diff --git a/Makefile b/Makefile index aea23b27f..450d6f89d 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ else endif include hack/make/bootstrap.mk +include hack/make/dep_atlas.mk include hack/make/dep_ent.mk include hack/make/dep_go_enums.mk include hack/make/dep_goa.mk @@ -30,7 +31,8 @@ include hack/make/dep_tparse.mk include hack/make/dep_workflowcheck.mk # Lazy-evaluated list of tools. -TOOLS = $(ENT) \ +TOOLS = $(ATLAS) \ + $(ENT) \ $(GO_ENUM) \ $(GOA) \ $(GOLANGCI_LINT) \ @@ -68,6 +70,14 @@ TEST_IGNORED_PACKAGES = $(filter $(IGNORED_PACKAGES),$(PACKAGES)) export PATH:=$(GOBIN):$(PATH) +atlas-hash: $(ATLAS) # @HELP Recalculate the migration hashes. + atlas migrate hash \ + --dir="file://internal/db/migrations" \ + --dir-format="atlas" + atlas migrate hash \ + --dir="file://internal/storage/persistence/migrations" \ + --dir-format="atlas" + db: # @HELP Opens the MySQL shell connected to the enduro development database. db: mysql -h127.0.0.1 -P3306 -uroot -proot123 enduro @@ -106,8 +116,8 @@ gen-enums: # @HELP Generate go-enum assets. gen-enums: ENUM_FLAGS = --names --template=$(CURDIR)/hack/make/enums.tmpl gen-enums: $(GO_ENUM) go-enum $(ENUM_FLAGS) \ - -f internal/enums/package_type.go \ - -f internal/enums/pkg_status.go \ + -f internal/enums/sip_type.go \ + -f internal/enums/sip_status.go \ -f internal/enums/preprocessing_task_outcome.go \ -f internal/enums/pres_action_status.go \ -f internal/enums/pres_action_type.go \ diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go index c8d33260f..6d2399b5f 100644 --- a/cmd/migrate/main.go +++ b/cmd/migrate/main.go @@ -1,14 +1,26 @@ -// Example: +// Example (ingest): +// +// 1. Make changes to schema files (internal/persistence/ent/schema), +// 2. Re-generate (make gen-ent), +// 3. Use an empty MySQL database, +// 4. Run: +// $ go run ./cmd/migrate/ \ +// --db="ingest" \ +// --dsn="mysql://root:root123@tcp(localhost:3306)/enduro_migrate" \ +// --path="./internal/db/migrations" \ +// --name="changes" +// +// Example (storage): // // 1. Make changes to schema files (internal/storage/persistence/ent/schema), // 2. Re-generate (make gen-ent), -// 3. Drop any existing database tables or delete and re-create the database, +// 3. Use an empty MySQL database, // 4. Run: // $ go run ./cmd/migrate/ \ -// --config="./enduro.toml" \ -// --dsn="mysql://enduro:enduro123@tcp(localhost:3306)/enduro_storage" \ -// --name="init" \ -// --path="./internal/storage/persistence/migrations" +// --db="storage" \ +// --dsn="mysql://root:root123@tcp(localhost:3306)/enduro_migrate" \ +// --path="./internal/storage/persistence/migrations" \ +// --name="changes" package main import ( @@ -16,7 +28,6 @@ import ( "fmt" "log" "os" - "path/filepath" "strings" "ariga.io/atlas/sql/sqltool" @@ -25,40 +36,38 @@ import ( "github.com/go-sql-driver/mysql" "github.com/spf13/pflag" - "github.com/artefactual-sdps/enduro/internal/config" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/migrate" + ingest_migrate "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/migrate" + storage_migrate "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/migrate" ) func main() { p := pflag.NewFlagSet("migrate", pflag.ExitOnError) - p.String("config", "", "Configuration file") + p.String("db", "", "Enduro database ('ingest' or 'storage')") p.String("dsn", "", "MySQL DSN") p.String("path", "", "Migration directory") p.String("name", "changes", "Migration name") _ = p.Parse(os.Args[1:]) - path, _ := p.GetString("path") - if path == "" { - wd, err := os.Getwd() - if err != nil { - os.Exit(1) - } - // Guessing that running it from the root folder. - path = filepath.Join(wd, "internal/storage/persistence/migrations") + db, _ := p.GetString("db") + if db == "" { + fmt.Printf("--db flag is missing") + os.Exit(1) + } + if db != "ingest" && db != "storage" { + fmt.Printf("--db flag has an unexpected value (use 'ingest' or 'storage')") + os.Exit(1) } - var cfg config.Configuration - configFile, _ := p.GetString("config") - _, _, err := config.Read(&cfg, configFile) - if err != nil { - fmt.Printf("Failed to read configuration: %v\n", err) + DSN, _ := p.GetString("dsn") + if DSN == "" { + fmt.Printf("--dsn flag is missing") os.Exit(1) } - DSN := cfg.Storage.Database.DSN - flagDSN, _ := p.GetString("dsn") - if flagDSN != "" { - DSN = flagDSN + path, _ := p.GetString("path") + if path == "" { + fmt.Printf("--path flag is missing") + os.Exit(1) } // MySQL's DSN format is not accepted by Ent, convert as needed (remove Net). @@ -89,11 +98,17 @@ func main() { schema.WithDir(dir), // provide migration directory schema.WithMigrationMode(schema.ModeReplay), // provide migration mode schema.WithDialect(dialect.MySQL), // Ent dialect to use + schema.WithDropIndex(true), + schema.WithDropColumn(true), } // Generate migrations using Atlas support for TiDB (note the Ent dialect option passed above). name, _ := p.GetString("name") - err = migrate.NamedDiff(ctx, entDSN, name, opts...) + if db == "ingest" { + err = ingest_migrate.NamedDiff(ctx, entDSN, name, opts...) + } else { + err = storage_migrate.NamedDiff(ctx, entDSN, name, opts...) + } if err != nil { log.Fatalf("failed generating migration file: %v", err) } diff --git a/go.mod b/go.mod index 26ca5e9d9..c5601f74a 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/artefactual-sdps/enduro go 1.23.6 require ( - ariga.io/atlas v0.19.2 + ariga.io/atlas v0.31.0 ariga.io/sqlcomment v0.1.0 buf.build/gen/go/artefactual/a3m/grpc/go v1.3.0-20230508184533-2e9432075630.2 buf.build/gen/go/artefactual/a3m/protocolbuffers/go v1.31.0-20230508184533-2e9432075630.2 @@ -95,6 +95,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect github.com/aws/smithy-go v1.20.3 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/bodgit/plumbing v1.2.0 // indirect github.com/bodgit/sevenzip v1.3.0 // indirect github.com/bodgit/windows v1.0.0 // indirect @@ -158,7 +159,8 @@ require ( github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - github.com/zclconf/go-cty v1.14.1 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect diff --git a/go.sum b/go.sum index 36176ef2c..220a2e88e 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -ariga.io/atlas v0.19.2 h1:ulK06d4joEaMP06HNNPxdpD8dFgZGzjzjk+Mb5VfF08= -ariga.io/atlas v0.19.2/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE= +ariga.io/atlas v0.31.0 h1:Nw6/Jdc7OpZfiy6oh/dJAYPp5XxGYvMTWLOUutwWjeY= +ariga.io/atlas v0.31.0/go.mod h1:J3chwsQAgjDF6Ostz7JmJJRTCbtqIupUbVR/gqZrMiA= ariga.io/sqlcomment v0.1.0 h1:8kQPlVe3sXpTloEFlpX5dhFAXB28i6rwq9ktqqnPx70= ariga.io/sqlcomment v0.1.0/go.mod h1:NT1IZMfBTQl1MUU5wgVONmnDqFRqtZrdDRgAXfc1g5k= buf.build/gen/go/artefactual/a3m/grpc/go v1.3.0-20230508184533-2e9432075630.2 h1:+AADIVDD4GabjtfMHO/M9WIQBc/0IXN7ddYuN8VU8tc= @@ -487,6 +487,8 @@ github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/bodgit/plumbing v1.2.0 h1:gg4haxoKphLjml+tgnecR4yLBV5zo4HAZGCtAh3xCzM= github.com/bodgit/plumbing v1.2.0/go.mod h1:b9TeRi7Hvc6Y05rjm8VML3+47n4XTZPtQ/5ghqic2n8= github.com/bodgit/sevenzip v1.3.0 h1:1ljgELgtHqvgIp8W8kgeEGHIWP4ch3xGI8uOBZgLVKY= @@ -942,8 +944,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= -github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= go.artefactual.dev/amclient v0.4.1-0.20240705155055-0c5abef5207c h1:kMgKuV9yx0LTQpkWdPxPC4GHAKMS9pDQ0x3CD8SmLfI= go.artefactual.dev/amclient v0.4.1-0.20240705155055-0c5abef5207c/go.mod h1:oAOlZHC78IWjWDCqRM1/8K1x/WYmRUL3peujKCdoXaA= go.artefactual.dev/tools v0.14.0 h1:ESLbemsnkdIPmYXtz0uZTcPqVnTUXIEZd9DSTRyTZqY= diff --git a/hack/make/dep_atlas.mk b/hack/make/dep_atlas.mk new file mode 100644 index 000000000..7a0376daf --- /dev/null +++ b/hack/make/dep_atlas.mk @@ -0,0 +1,22 @@ +$(call _assert_var,MAKEDIR) +$(call _conditional_include,$(MAKEDIR)/base.mk) +$(call _assert_var,UNAME_OS) +$(call _assert_var,UNAME_ARCH2) +$(call _assert_var,CACHE_VERSIONS) +$(call _assert_var,CACHE_BIN) + +ATLAS_VERSION ?= 0.31.0 + +ATLAS := $(CACHE_VERSIONS)/atlas/$(ATLAS_VERSION) +$(ATLAS): + rm -f $(CACHE_BIN)/atlas + mkdir -p $(CACHE_BIN) + $(eval TMP := $(shell mktemp -d)) + $(eval OS := $(shell echo $(UNAME_OS) | tr A-Z a-z)) + curl -sSL \ + https://release.ariga.io/atlas/atlas-$(OS)-$(UNAME_ARCH2)-v$(ATLAS_VERSION) \ + > $(CACHE_BIN)/atlas + chmod +x $(CACHE_BIN)/atlas + rm -rf $(dir $(ATLAS)) + mkdir -p $(dir $(ATLAS)) + touch $(ATLAS) diff --git a/internal/am/job_tracker_test.go b/internal/am/job_tracker_test.go index f3a73a6eb..fd0478459 100644 --- a/internal/am/job_tracker_test.go +++ b/internal/am/job_tracker_test.go @@ -211,7 +211,7 @@ func TestConvertJobToPreservationTask(t *testing.T) { want: datatypes.PreservationTask{ TaskID: "f60018ac-da79-4769-9509-c6c41d5efe7e", Name: "Move to processing directory", - Status: enums.PreservationTaskStatus(enums.PackageStatusDone), + Status: enums.PreservationTaskStatusDone, StartedAt: sql.NullTime{ Time: time.Date(2024, time.January, 18, 1, 27, 49, 0, time.UTC), Valid: true, @@ -251,7 +251,7 @@ func TestConvertJobToPreservationTask(t *testing.T) { want: datatypes.PreservationTask{ TaskID: "c2128d39-2ace-47c5-8cac-39ded8d9c9ef", Name: "Verify SIP compliance", - Status: enums.PreservationTaskStatus(enums.PackageStatusInProgress), + Status: enums.PreservationTaskStatusInProgress, StartedAt: sql.NullTime{ Time: time.Date(2024, time.January, 18, 1, 27, 49, 0, time.UTC), Valid: true, diff --git a/internal/api/design/package_.go b/internal/api/design/package_.go index 18bc75284..b8392658f 100644 --- a/internal/api/design/package_.go +++ b/internal/api/design/package_.go @@ -266,7 +266,7 @@ var _ = Service("package", func() { }) var EnumPackageStatus = func() { - Enum(enums.PackageStatusInterfaces()...) + Enum(enums.SIPStatusInterfaces()...) } var Package_ = Type("Package", func() { @@ -275,7 +275,7 @@ var Package_ = Type("Package", func() { TypedAttributeUUID("location_id", "Identifier of storage location") Attribute("status", String, "Status of the package", func() { EnumPackageStatus() - Default(enums.PackageStatusNew.String()) + Default(enums.SIPStatusNew.String()) }) AttributeUUID("workflow_id", "Identifier of processing workflow") AttributeUUID("run_id", "Identifier of latest processing workflow run") diff --git a/internal/datatypes/package_.go b/internal/datatypes/package_.go deleted file mode 100644 index f5bdc5847..000000000 --- a/internal/datatypes/package_.go +++ /dev/null @@ -1,64 +0,0 @@ -package datatypes - -import ( - "database/sql" - "time" - - "github.com/google/uuid" - "go.artefactual.dev/tools/ref" - - goapackage "github.com/artefactual-sdps/enduro/internal/api/gen/package_" - "github.com/artefactual-sdps/enduro/internal/db" - "github.com/artefactual-sdps/enduro/internal/enums" -) - -// Package represents a package in the package table. -type Package struct { - ID int `db:"id"` - Name string `db:"name"` - WorkflowID string `db:"workflow_id"` - RunID string `db:"run_id"` - AIPID uuid.NullUUID `db:"aip_id"` // Nullable. - LocationID uuid.NullUUID `db:"location_id"` // Nullable. - Status enums.PackageStatus `db:"status"` - - // It defaults to CURRENT_TIMESTAMP(6) so populated as soon as possible. - CreatedAt time.Time `db:"created_at"` - - // Nullable, populated as soon as processing starts. - StartedAt sql.NullTime `db:"started_at"` - - // Nullable, populated as soon as ingest completes. - CompletedAt sql.NullTime `db:"completed_at"` -} - -// Goa returns the API representation of the package. -func (p *Package) Goa() *goapackage.EnduroStoredPackage { - if p == nil { - return nil - } - - var id uint - if p.ID > 0 { - id = uint(p.ID) // #nosec G115 -- range validated. - } - - col := goapackage.EnduroStoredPackage{ - ID: id, - Name: db.FormatOptionalString(p.Name), - WorkflowID: db.FormatOptionalString(p.WorkflowID), - RunID: db.FormatOptionalString(p.RunID), - Status: p.Status.String(), - CreatedAt: db.FormatTime(p.CreatedAt), - StartedAt: db.FormatOptionalTime(p.StartedAt), - CompletedAt: db.FormatOptionalTime(p.CompletedAt), - } - if p.AIPID.Valid { - col.AipID = ref.New(p.AIPID.UUID.String()) - } - if p.LocationID.Valid { - col.LocationID = &p.LocationID.UUID - } - - return &col -} diff --git a/internal/datatypes/preservation_action.go b/internal/datatypes/preservation_action.go index a8aafece0..e98eb145a 100644 --- a/internal/datatypes/preservation_action.go +++ b/internal/datatypes/preservation_action.go @@ -14,5 +14,5 @@ type PreservationAction struct { Status enums.PreservationActionStatus `db:"status"` StartedAt sql.NullTime `db:"started_at"` CompletedAt sql.NullTime `db:"completed_at"` - PackageID int `db:"package_id"` + SIPID int `db:"sip_id"` } diff --git a/internal/datatypes/sip.go b/internal/datatypes/sip.go new file mode 100644 index 000000000..1a45eee29 --- /dev/null +++ b/internal/datatypes/sip.go @@ -0,0 +1,64 @@ +package datatypes + +import ( + "database/sql" + "time" + + "github.com/google/uuid" + "go.artefactual.dev/tools/ref" + + goapackage "github.com/artefactual-sdps/enduro/internal/api/gen/package_" + "github.com/artefactual-sdps/enduro/internal/db" + "github.com/artefactual-sdps/enduro/internal/enums" +) + +// SIP represents a SIP in the sip table. +type SIP struct { + ID int `db:"id"` + Name string `db:"name"` + WorkflowID string `db:"workflow_id"` + RunID string `db:"run_id"` + AIPID uuid.NullUUID `db:"aip_id"` // Nullable. + LocationID uuid.NullUUID `db:"location_id"` // Nullable. + Status enums.SIPStatus `db:"status"` + + // It defaults to CURRENT_TIMESTAMP(6) so populated as soon as possible. + CreatedAt time.Time `db:"created_at"` + + // Nullable, populated as soon as processing starts. + StartedAt sql.NullTime `db:"started_at"` + + // Nullable, populated as soon as ingest completes. + CompletedAt sql.NullTime `db:"completed_at"` +} + +// Goa returns the API representation of the SIP. +func (s *SIP) Goa() *goapackage.EnduroStoredPackage { + if s == nil { + return nil + } + + var id uint + if s.ID > 0 { + id = uint(s.ID) // #nosec G115 -- range validated. + } + + col := goapackage.EnduroStoredPackage{ + ID: id, + Name: db.FormatOptionalString(s.Name), + WorkflowID: db.FormatOptionalString(s.WorkflowID), + RunID: db.FormatOptionalString(s.RunID), + Status: s.Status.String(), + CreatedAt: db.FormatTime(s.CreatedAt), + StartedAt: db.FormatOptionalTime(s.StartedAt), + CompletedAt: db.FormatOptionalTime(s.CompletedAt), + } + if s.AIPID.Valid { + col.AipID = ref.New(s.AIPID.UUID.String()) + } + if s.LocationID.Valid { + col.LocationID = &s.LocationID.UUID + } + + return &col +} diff --git a/internal/db/migrations/20250207193001_rename_package_table.down.sql b/internal/db/migrations/20250207193001_rename_package_table.down.sql new file mode 100644 index 000000000..dfc9da021 --- /dev/null +++ b/internal/db/migrations/20250207193001_rename_package_table.down.sql @@ -0,0 +1,8 @@ +-- drop foreign key constraint from "preservation_action" table +ALTER TABLE `preservation_action` DROP FOREIGN KEY `preservation_action_ibfk_1`; +-- reverse: rename column from "package_id" to "sip_id" +ALTER TABLE `preservation_action` CHANGE COLUMN `sip_id` `package_id` INT UNSIGNED NOT NULL; +-- reverse: rename table from "package" to "sip" +RENAME TABLE `sip` TO `package`; +-- recreate foreign key constraint +ALTER TABLE `preservation_action` ADD CONSTRAINT `preservation_action_ibfk_1` FOREIGN KEY (`package_id`) REFERENCES `package` (`id`) ON DELETE CASCADE; diff --git a/internal/db/migrations/20250207193001_rename_package_table.up.sql b/internal/db/migrations/20250207193001_rename_package_table.up.sql new file mode 100644 index 000000000..1efa794d9 --- /dev/null +++ b/internal/db/migrations/20250207193001_rename_package_table.up.sql @@ -0,0 +1,8 @@ +-- drop foreign key constraint from "preservation_action" table +ALTER TABLE `preservation_action` DROP FOREIGN KEY `preservation_action_ibfk_1`; +-- rename column from "package_id" to "sip_id" +ALTER TABLE `preservation_action` CHANGE COLUMN `package_id` `sip_id` INT UNSIGNED NOT NULL; +-- rename table from "package" to "sip" +RENAME TABLE `package` TO `sip`; +-- recreate foreign key constraint +ALTER TABLE `preservation_action` ADD CONSTRAINT `preservation_action_ibfk_1` FOREIGN KEY (`sip_id`) REFERENCES `sip` (`id`) ON DELETE CASCADE; diff --git a/internal/db/migrations/20250210184720_use_atlas.down.sql b/internal/db/migrations/20250210184720_use_atlas.down.sql new file mode 100644 index 000000000..2427df07b --- /dev/null +++ b/internal/db/migrations/20250210184720_use_atlas.down.sql @@ -0,0 +1,49 @@ +-- drop foreign key constraint from "preservation_action" table +ALTER TABLE `preservation_action` DROP FOREIGN KEY `preservation_action_sip_preservation_actions`; +-- reverse: modify "sip" table +ALTER TABLE `sip` COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `id` int unsigned NOT NULL AUTO_INCREMENT, + MODIFY COLUMN `name` varchar(2048) NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `workflow_id` varchar(255) NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `run_id` varchar(36) NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `aip_id` varchar(36) NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `location_id` varchar(36) NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `created_at` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + MODIFY COLUMN `started_at` timestamp(6) NULL, + MODIFY COLUMN `completed_at` timestamp(6) NULL, + DROP INDEX `sip_aip_id_idx`, + DROP INDEX `sip_created_at_idx`, + DROP INDEX `sip_location_id_idx`, + DROP INDEX `sip_name_idx`, + DROP INDEX `sip_started_at_idx`, + DROP INDEX `sip_status_idx`, + ADD INDEX `package_aip_id_idx` (`aip_id`), + ADD INDEX `package_created_at_idx` (`created_at`), + ADD INDEX `package_location_id_idx` (`location_id`), + ADD INDEX `package_name_idx` (`name` (50)), + ADD INDEX `package_started_at_idx` (`started_at`), + ADD INDEX `package_status_idx` (`status`); +-- drop foreign key constraint from "preservation_task" table +ALTER TABLE `preservation_task` DROP FOREIGN KEY `preservation_task_preservation_action_tasks`; +-- reverse: modify "preservation_action" table +ALTER TABLE `preservation_action` COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `id` int unsigned NOT NULL AUTO_INCREMENT, + MODIFY COLUMN `workflow_id` varchar(255) NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `started_at` timestamp(6) NULL, + MODIFY COLUMN `completed_at` timestamp(6) NULL, + MODIFY COLUMN `sip_id` int unsigned NOT NULL, + DROP INDEX `preservation_action_sip_preservation_actions`, + ADD CONSTRAINT `preservation_action_ibfk_1` + FOREIGN KEY (`sip_id`) REFERENCES `sip` (`id`) ON UPDATE NO ACTION ON DELETE CASCADE; +-- reverse: modify "preservation_task" table +ALTER TABLE `preservation_task` COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `id` int unsigned NOT NULL AUTO_INCREMENT, + MODIFY COLUMN `task_id` varchar(36) NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `name` varchar(2048) NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `started_at` timestamp(6) NULL, + MODIFY COLUMN `completed_at` timestamp(6) NULL, + MODIFY COLUMN `note` longtext NOT NULL COLLATE utf8mb4_0900_ai_ci, + MODIFY COLUMN `preservation_action_id` int unsigned NOT NULL, + DROP INDEX `preservation_task_preservation_action_tasks`, + ADD CONSTRAINT `preservation_task_ibfk_1` + FOREIGN KEY (`preservation_action_id`) REFERENCES `preservation_action` (`id`) ON UPDATE NO ACTION ON DELETE CASCADE; diff --git a/internal/db/migrations/20250210184720_use_atlas.up.sql b/internal/db/migrations/20250210184720_use_atlas.up.sql new file mode 100644 index 000000000..53fc5998d --- /dev/null +++ b/internal/db/migrations/20250210184720_use_atlas.up.sql @@ -0,0 +1,49 @@ +-- drop foreign key constraint from "preservation_action" table +ALTER TABLE `preservation_action` DROP FOREIGN KEY `preservation_action_ibfk_1`; +-- modify "sip" table +ALTER TABLE `sip` COLLATE utf8mb4_bin, + MODIFY COLUMN `id` bigint NOT NULL AUTO_INCREMENT, + MODIFY COLUMN `name` varchar(2048) NOT NULL, + MODIFY COLUMN `workflow_id` varchar(255) NOT NULL, + MODIFY COLUMN `run_id` char(36) NOT NULL, + MODIFY COLUMN `aip_id` char(36) NULL, + MODIFY COLUMN `location_id` char(36) NULL, + MODIFY COLUMN `created_at` timestamp NOT NULL, + MODIFY COLUMN `started_at` timestamp NULL, + MODIFY COLUMN `completed_at` timestamp NULL, + DROP INDEX `package_aip_id_idx`, + DROP INDEX `package_created_at_idx`, + DROP INDEX `package_location_id_idx`, + DROP INDEX `package_name_idx`, + DROP INDEX `package_started_at_idx`, + DROP INDEX `package_status_idx`, + ADD INDEX `sip_aip_id_idx` (`aip_id`), + ADD INDEX `sip_created_at_idx` (`created_at`), + ADD INDEX `sip_location_id_idx` (`location_id`), + ADD INDEX `sip_name_idx` (`name` (50)), + ADD INDEX `sip_started_at_idx` (`started_at`), + ADD INDEX `sip_status_idx` (`status`); +-- drop foreign key constraint from "preservation_task" table +ALTER TABLE `preservation_task` DROP FOREIGN KEY `preservation_task_ibfk_1`; +-- modify "preservation_action" table +ALTER TABLE `preservation_action` COLLATE utf8mb4_bin, + MODIFY COLUMN `id` bigint NOT NULL AUTO_INCREMENT, + MODIFY COLUMN `workflow_id` varchar(255) NOT NULL, + MODIFY COLUMN `started_at` timestamp NULL, + MODIFY COLUMN `completed_at` timestamp NULL, + MODIFY COLUMN `sip_id` bigint NOT NULL, + ADD INDEX `preservation_action_sip_preservation_actions` (`sip_id`), + ADD CONSTRAINT `preservation_action_sip_preservation_actions` + FOREIGN KEY (`sip_id`) REFERENCES `sip` (`id`) ON UPDATE NO ACTION ON DELETE CASCADE; +-- modify "preservation_task" table +ALTER TABLE `preservation_task` COLLATE utf8mb4_bin, + MODIFY COLUMN `id` bigint NOT NULL AUTO_INCREMENT, + MODIFY COLUMN `task_id` char(36) NOT NULL, + MODIFY COLUMN `name` varchar(2048) NOT NULL, + MODIFY COLUMN `started_at` timestamp NULL, + MODIFY COLUMN `completed_at` timestamp NULL, + MODIFY COLUMN `note` longtext NOT NULL, + MODIFY COLUMN `preservation_action_id` bigint NOT NULL, + ADD INDEX `preservation_task_preservation_action_tasks` (`preservation_action_id`), + ADD CONSTRAINT `preservation_task_preservation_action_tasks` + FOREIGN KEY (`preservation_action_id`) REFERENCES `preservation_action` (`id`) ON UPDATE NO ACTION ON DELETE CASCADE; diff --git a/internal/db/migrations/atlas.sum b/internal/db/migrations/atlas.sum new file mode 100644 index 000000000..c7352a02c --- /dev/null +++ b/internal/db/migrations/atlas.sum @@ -0,0 +1,9 @@ +h1:2cHdzZbV3R0R8RyylKlOCm7fnCdnpEBzpvvPcaCpWiA= +1570659451_init.down.sql h1:hZkExHm7J+Njznl8jVlCA3ePmlKfnCi3h0UFLvgU0kU= +1570659451_init.up.sql h1:/Bi8TK4Lt/bWcuxnQMqAvRO3e/NhFDQusiR3fjk5Z3c= +1710442322_nullable_aip_id.down.sql h1:naFYM+Aybp/Vz8EwJOG7fRf/CWmV87qwylR4YR/76XE= +1710442322_nullable_aip_id.up.sql h1:POGPYt8Mt0rWGMfns5JtXF4WAadA+q2Lpk9i69BanKc= +20250207193001_rename_package_table.down.sql h1:g/QV6X4SuRUBJVJ3E6r76T4F3LxEJZ7Y/JbITaenHIE= +20250207193001_rename_package_table.up.sql h1:W1lSrRpAw4X8n9A/qlaD1IzRv67SZmOXUWNe5ivX/bQ= +20250210184720_use_atlas.down.sql h1:NnSR/0LHahSc75I4nR+3Rksn7noiKztEZB0kK3yIvtQ= +20250210184720_use_atlas.up.sql h1:2OwM5TbYcVes4N2X2wFXUxtGIgab/EQlrZ0Eox3PYrU= diff --git a/internal/enums/package_type_enum.go b/internal/enums/package_type_enum.go deleted file mode 100644 index 966111a4b..000000000 --- a/internal/enums/package_type_enum.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by go-enum DO NOT EDIT. -// Version: 0.6.0 -// Revision: 919e61c0174b91303753ee3898569a01abb32c97 -// Build Date: 2023-12-18T15:54:43Z -// Built By: goreleaser - -package enums - -import ( - "fmt" - "strings" -) - -const ( - // PackageTypeUnknown is a PackageType of type Unknown. - PackageTypeUnknown PackageType = iota - // PackageTypeBagIt is a PackageType of type BagIt. - PackageTypeBagIt - // PackageTypeArchivematicaStandardTransfer is a PackageType of type Archivematica Standard Transfer. - PackageTypeArchivematicaStandardTransfer -) - -var ErrInvalidPackageType = fmt.Errorf("not a valid PackageType, try [%s]", strings.Join(_PackageTypeNames, ", ")) - -const _PackageTypeName = "UnknownBagItArchivematica Standard Transfer" - -var _PackageTypeNames = []string{ - _PackageTypeName[0:7], - _PackageTypeName[7:12], - _PackageTypeName[12:43], -} - -// PackageTypeNames returns a list of possible string values of PackageType. -func PackageTypeNames() []string { - tmp := make([]string, len(_PackageTypeNames)) - copy(tmp, _PackageTypeNames) - return tmp -} - -var _PackageTypeMap = map[PackageType]string{ - PackageTypeUnknown: _PackageTypeName[0:7], - PackageTypeBagIt: _PackageTypeName[7:12], - PackageTypeArchivematicaStandardTransfer: _PackageTypeName[12:43], -} - -// String implements the Stringer interface. -func (x PackageType) String() string { - if str, ok := _PackageTypeMap[x]; ok { - return str - } - return fmt.Sprintf("PackageType(%d)", x) -} - -// IsValid provides a quick way to determine if the typed value is -// part of the allowed enumerated values -func (x PackageType) IsValid() bool { - _, ok := _PackageTypeMap[x] - return ok -} - -var _PackageTypeValue = map[string]PackageType{ - _PackageTypeName[0:7]: PackageTypeUnknown, - _PackageTypeName[7:12]: PackageTypeBagIt, - _PackageTypeName[12:43]: PackageTypeArchivematicaStandardTransfer, -} - -// ParsePackageType attempts to convert a string to a PackageType. -func ParsePackageType(name string) (PackageType, error) { - if x, ok := _PackageTypeValue[name]; ok { - return x, nil - } - return PackageType(0), fmt.Errorf("%s is %w", name, ErrInvalidPackageType) -} - -// Values implements the entgo.io/ent/schema/field EnumValues interface. -func (x PackageType) Values() []string { - return PackageTypeNames() -} - -// PackageTypeInterfaces returns an interface list of possible values of PackageType. -func PackageTypeInterfaces() []interface{} { - var tmp []interface{} - for _, v := range _PackageTypeNames { - tmp = append(tmp, v) - } - return tmp -} - -// ParsePackageTypeWithDefault attempts to convert a string to a ContentType. -// It returns the default value if name is empty. -func ParsePackageTypeWithDefault(name string) (PackageType, error) { - if name == "" { - return _PackageTypeValue[_PackageTypeNames[0]], nil - } - if x, ok := _PackageTypeValue[name]; ok { - return x, nil - } - var e PackageType - return e, fmt.Errorf("%s is not a valid PackageType, try [%s]", name, strings.Join(_PackageTypeNames, ", ")) -} - -// NormalizePackageType attempts to parse a and normalize string as content type. -// It returns the input untouched if name fails to be parsed. -// Example: -// -// "enUM" will be normalized (if possible) to "Enum" -func NormalizePackageType(name string) string { - res, err := ParsePackageType(name) - if err != nil { - return name - } - return res.String() -} diff --git a/internal/enums/pkg_status_enum.go b/internal/enums/pkg_status_enum.go deleted file mode 100644 index 5f79eaff2..000000000 --- a/internal/enums/pkg_status_enum.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by go-enum DO NOT EDIT. -// Version: 0.6.0 -// Revision: 919e61c0174b91303753ee3898569a01abb32c97 -// Build Date: 2023-12-18T15:54:43Z -// Built By: goreleaser - -package enums - -import ( - "fmt" - "strings" -) - -const ( - // PackageStatusNew is a PackageStatus of type New. - // Unused! - PackageStatusNew PackageStatus = iota - // PackageStatusInProgress is a PackageStatus of type In Progress. - // Undergoing work. - PackageStatusInProgress - // PackageStatusDone is a PackageStatus of type Done. - // Work has completed. - PackageStatusDone - // PackageStatusError is a PackageStatus of type Error. - // Processing failed. - PackageStatusError - // PackageStatusUnknown is a PackageStatus of type Unknown. - // Unused! - PackageStatusUnknown - // PackageStatusQueued is a PackageStatus of type Queued. - // Awaiting resource allocation. - PackageStatusQueued - // PackageStatusAbandoned is a PackageStatus of type Abandoned. - // User abandoned processing. - PackageStatusAbandoned - // PackageStatusPending is a PackageStatus of type Pending. - // Awaiting user decision. - PackageStatusPending -) - -var ErrInvalidPackageStatus = fmt.Errorf("not a valid PackageStatus, try [%s]", strings.Join(_PackageStatusNames, ", ")) - -const _PackageStatusName = "newin progressdoneerrorunknownqueuedabandonedpending" - -var _PackageStatusNames = []string{ - _PackageStatusName[0:3], - _PackageStatusName[3:14], - _PackageStatusName[14:18], - _PackageStatusName[18:23], - _PackageStatusName[23:30], - _PackageStatusName[30:36], - _PackageStatusName[36:45], - _PackageStatusName[45:52], -} - -// PackageStatusNames returns a list of possible string values of PackageStatus. -func PackageStatusNames() []string { - tmp := make([]string, len(_PackageStatusNames)) - copy(tmp, _PackageStatusNames) - return tmp -} - -var _PackageStatusMap = map[PackageStatus]string{ - PackageStatusNew: _PackageStatusName[0:3], - PackageStatusInProgress: _PackageStatusName[3:14], - PackageStatusDone: _PackageStatusName[14:18], - PackageStatusError: _PackageStatusName[18:23], - PackageStatusUnknown: _PackageStatusName[23:30], - PackageStatusQueued: _PackageStatusName[30:36], - PackageStatusAbandoned: _PackageStatusName[36:45], - PackageStatusPending: _PackageStatusName[45:52], -} - -// String implements the Stringer interface. -func (x PackageStatus) String() string { - if str, ok := _PackageStatusMap[x]; ok { - return str - } - return fmt.Sprintf("PackageStatus(%d)", x) -} - -// IsValid provides a quick way to determine if the typed value is -// part of the allowed enumerated values -func (x PackageStatus) IsValid() bool { - _, ok := _PackageStatusMap[x] - return ok -} - -var _PackageStatusValue = map[string]PackageStatus{ - _PackageStatusName[0:3]: PackageStatusNew, - _PackageStatusName[3:14]: PackageStatusInProgress, - _PackageStatusName[14:18]: PackageStatusDone, - _PackageStatusName[18:23]: PackageStatusError, - _PackageStatusName[23:30]: PackageStatusUnknown, - _PackageStatusName[30:36]: PackageStatusQueued, - _PackageStatusName[36:45]: PackageStatusAbandoned, - _PackageStatusName[45:52]: PackageStatusPending, -} - -// ParsePackageStatus attempts to convert a string to a PackageStatus. -func ParsePackageStatus(name string) (PackageStatus, error) { - if x, ok := _PackageStatusValue[name]; ok { - return x, nil - } - return PackageStatus(0), fmt.Errorf("%s is %w", name, ErrInvalidPackageStatus) -} - -// Values implements the entgo.io/ent/schema/field EnumValues interface. -func (x PackageStatus) Values() []string { - return PackageStatusNames() -} - -// PackageStatusInterfaces returns an interface list of possible values of PackageStatus. -func PackageStatusInterfaces() []interface{} { - var tmp []interface{} - for _, v := range _PackageStatusNames { - tmp = append(tmp, v) - } - return tmp -} - -// ParsePackageStatusWithDefault attempts to convert a string to a ContentType. -// It returns the default value if name is empty. -func ParsePackageStatusWithDefault(name string) (PackageStatus, error) { - if name == "" { - return _PackageStatusValue[_PackageStatusNames[0]], nil - } - if x, ok := _PackageStatusValue[name]; ok { - return x, nil - } - var e PackageStatus - return e, fmt.Errorf("%s is not a valid PackageStatus, try [%s]", name, strings.Join(_PackageStatusNames, ", ")) -} - -// NormalizePackageStatus attempts to parse a and normalize string as content type. -// It returns the input untouched if name fails to be parsed. -// Example: -// -// "enUM" will be normalized (if possible) to "Enum" -func NormalizePackageStatus(name string) string { - res, err := ParsePackageStatus(name) - if err != nil { - return name - } - return res.String() -} diff --git a/internal/enums/pkg_status.go b/internal/enums/sip_status.go similarity index 92% rename from internal/enums/pkg_status.go rename to internal/enums/sip_status.go index 5cb2e8403..1ecf41b93 100644 --- a/internal/enums/pkg_status.go +++ b/internal/enums/sip_status.go @@ -12,4 +12,4 @@ abandoned // User abandoned processing. pending // Awaiting user decision. ) */ -type PackageStatus uint +type SIPStatus uint diff --git a/internal/enums/sip_status_enum.go b/internal/enums/sip_status_enum.go new file mode 100644 index 000000000..66fc27a1d --- /dev/null +++ b/internal/enums/sip_status_enum.go @@ -0,0 +1,146 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: 0.6.0 +// Revision: 919e61c0174b91303753ee3898569a01abb32c97 +// Build Date: 2023-12-18T15:54:43Z +// Built By: goreleaser + +package enums + +import ( + "fmt" + "strings" +) + +const ( + // SIPStatusNew is a SIPStatus of type New. + // Unused! + SIPStatusNew SIPStatus = iota + // SIPStatusInProgress is a SIPStatus of type In Progress. + // Undergoing work. + SIPStatusInProgress + // SIPStatusDone is a SIPStatus of type Done. + // Work has completed. + SIPStatusDone + // SIPStatusError is a SIPStatus of type Error. + // Processing failed. + SIPStatusError + // SIPStatusUnknown is a SIPStatus of type Unknown. + // Unused! + SIPStatusUnknown + // SIPStatusQueued is a SIPStatus of type Queued. + // Awaiting resource allocation. + SIPStatusQueued + // SIPStatusAbandoned is a SIPStatus of type Abandoned. + // User abandoned processing. + SIPStatusAbandoned + // SIPStatusPending is a SIPStatus of type Pending. + // Awaiting user decision. + SIPStatusPending +) + +var ErrInvalidSIPStatus = fmt.Errorf("not a valid SIPStatus, try [%s]", strings.Join(_SIPStatusNames, ", ")) + +const _SIPStatusName = "newin progressdoneerrorunknownqueuedabandonedpending" + +var _SIPStatusNames = []string{ + _SIPStatusName[0:3], + _SIPStatusName[3:14], + _SIPStatusName[14:18], + _SIPStatusName[18:23], + _SIPStatusName[23:30], + _SIPStatusName[30:36], + _SIPStatusName[36:45], + _SIPStatusName[45:52], +} + +// SIPStatusNames returns a list of possible string values of SIPStatus. +func SIPStatusNames() []string { + tmp := make([]string, len(_SIPStatusNames)) + copy(tmp, _SIPStatusNames) + return tmp +} + +var _SIPStatusMap = map[SIPStatus]string{ + SIPStatusNew: _SIPStatusName[0:3], + SIPStatusInProgress: _SIPStatusName[3:14], + SIPStatusDone: _SIPStatusName[14:18], + SIPStatusError: _SIPStatusName[18:23], + SIPStatusUnknown: _SIPStatusName[23:30], + SIPStatusQueued: _SIPStatusName[30:36], + SIPStatusAbandoned: _SIPStatusName[36:45], + SIPStatusPending: _SIPStatusName[45:52], +} + +// String implements the Stringer interface. +func (x SIPStatus) String() string { + if str, ok := _SIPStatusMap[x]; ok { + return str + } + return fmt.Sprintf("SIPStatus(%d)", x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x SIPStatus) IsValid() bool { + _, ok := _SIPStatusMap[x] + return ok +} + +var _SIPStatusValue = map[string]SIPStatus{ + _SIPStatusName[0:3]: SIPStatusNew, + _SIPStatusName[3:14]: SIPStatusInProgress, + _SIPStatusName[14:18]: SIPStatusDone, + _SIPStatusName[18:23]: SIPStatusError, + _SIPStatusName[23:30]: SIPStatusUnknown, + _SIPStatusName[30:36]: SIPStatusQueued, + _SIPStatusName[36:45]: SIPStatusAbandoned, + _SIPStatusName[45:52]: SIPStatusPending, +} + +// ParseSIPStatus attempts to convert a string to a SIPStatus. +func ParseSIPStatus(name string) (SIPStatus, error) { + if x, ok := _SIPStatusValue[name]; ok { + return x, nil + } + return SIPStatus(0), fmt.Errorf("%s is %w", name, ErrInvalidSIPStatus) +} + +// Values implements the entgo.io/ent/schema/field EnumValues interface. +func (x SIPStatus) Values() []string { + return SIPStatusNames() +} + +// SIPStatusInterfaces returns an interface list of possible values of SIPStatus. +func SIPStatusInterfaces() []interface{} { + var tmp []interface{} + for _, v := range _SIPStatusNames { + tmp = append(tmp, v) + } + return tmp +} + +// ParseSIPStatusWithDefault attempts to convert a string to a ContentType. +// It returns the default value if name is empty. +func ParseSIPStatusWithDefault(name string) (SIPStatus, error) { + if name == "" { + return _SIPStatusValue[_SIPStatusNames[0]], nil + } + if x, ok := _SIPStatusValue[name]; ok { + return x, nil + } + var e SIPStatus + return e, fmt.Errorf("%s is not a valid SIPStatus, try [%s]", name, strings.Join(_SIPStatusNames, ", ")) +} + +// NormalizeSIPStatus attempts to parse a and normalize string as content type. +// It returns the input untouched if name fails to be parsed. +// Example: +// +// "enUM" will be normalized (if possible) to "Enum" +func NormalizeSIPStatus(name string) string { + res, err := ParseSIPStatus(name) + if err != nil { + return name + } + return res.String() +} diff --git a/internal/enums/package_type.go b/internal/enums/sip_type.go similarity index 77% rename from internal/enums/package_type.go rename to internal/enums/sip_type.go index 71834e745..5f6b64b66 100644 --- a/internal/enums/package_type.go +++ b/internal/enums/sip_type.go @@ -7,4 +7,4 @@ BagIt Archivematica Standard Transfer ) */ -type PackageType uint +type SIPType uint diff --git a/internal/enums/sip_type_enum.go b/internal/enums/sip_type_enum.go new file mode 100644 index 000000000..b1c06514f --- /dev/null +++ b/internal/enums/sip_type_enum.go @@ -0,0 +1,113 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: 0.6.0 +// Revision: 919e61c0174b91303753ee3898569a01abb32c97 +// Build Date: 2023-12-18T15:54:43Z +// Built By: goreleaser + +package enums + +import ( + "fmt" + "strings" +) + +const ( + // SIPTypeUnknown is a SIPType of type Unknown. + SIPTypeUnknown SIPType = iota + // SIPTypeBagIt is a SIPType of type BagIt. + SIPTypeBagIt + // SIPTypeArchivematicaStandardTransfer is a SIPType of type Archivematica Standard Transfer. + SIPTypeArchivematicaStandardTransfer +) + +var ErrInvalidSIPType = fmt.Errorf("not a valid SIPType, try [%s]", strings.Join(_SIPTypeNames, ", ")) + +const _SIPTypeName = "UnknownBagItArchivematica Standard Transfer" + +var _SIPTypeNames = []string{ + _SIPTypeName[0:7], + _SIPTypeName[7:12], + _SIPTypeName[12:43], +} + +// SIPTypeNames returns a list of possible string values of SIPType. +func SIPTypeNames() []string { + tmp := make([]string, len(_SIPTypeNames)) + copy(tmp, _SIPTypeNames) + return tmp +} + +var _SIPTypeMap = map[SIPType]string{ + SIPTypeUnknown: _SIPTypeName[0:7], + SIPTypeBagIt: _SIPTypeName[7:12], + SIPTypeArchivematicaStandardTransfer: _SIPTypeName[12:43], +} + +// String implements the Stringer interface. +func (x SIPType) String() string { + if str, ok := _SIPTypeMap[x]; ok { + return str + } + return fmt.Sprintf("SIPType(%d)", x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x SIPType) IsValid() bool { + _, ok := _SIPTypeMap[x] + return ok +} + +var _SIPTypeValue = map[string]SIPType{ + _SIPTypeName[0:7]: SIPTypeUnknown, + _SIPTypeName[7:12]: SIPTypeBagIt, + _SIPTypeName[12:43]: SIPTypeArchivematicaStandardTransfer, +} + +// ParseSIPType attempts to convert a string to a SIPType. +func ParseSIPType(name string) (SIPType, error) { + if x, ok := _SIPTypeValue[name]; ok { + return x, nil + } + return SIPType(0), fmt.Errorf("%s is %w", name, ErrInvalidSIPType) +} + +// Values implements the entgo.io/ent/schema/field EnumValues interface. +func (x SIPType) Values() []string { + return SIPTypeNames() +} + +// SIPTypeInterfaces returns an interface list of possible values of SIPType. +func SIPTypeInterfaces() []interface{} { + var tmp []interface{} + for _, v := range _SIPTypeNames { + tmp = append(tmp, v) + } + return tmp +} + +// ParseSIPTypeWithDefault attempts to convert a string to a ContentType. +// It returns the default value if name is empty. +func ParseSIPTypeWithDefault(name string) (SIPType, error) { + if name == "" { + return _SIPTypeValue[_SIPTypeNames[0]], nil + } + if x, ok := _SIPTypeValue[name]; ok { + return x, nil + } + var e SIPType + return e, fmt.Errorf("%s is not a valid SIPType, try [%s]", name, strings.Join(_SIPTypeNames, ", ")) +} + +// NormalizeSIPType attempts to parse a and normalize string as content type. +// It returns the input untouched if name fails to be parsed. +// Example: +// +// "enUM" will be normalized (if possible) to "Enum" +func NormalizeSIPType(name string) string { + res, err := ParseSIPType(name) + if err != nil { + return name + } + return res.String() +} diff --git a/internal/package_/convert.go b/internal/package_/convert.go index 349e7a447..4f0eec92c 100644 --- a/internal/package_/convert.go +++ b/internal/package_/convert.go @@ -16,15 +16,15 @@ import ( "github.com/artefactual-sdps/enduro/internal/timerange" ) -func packageToGoaPackageCreatedEvent(p *datatypes.Package) *goapackage.PackageCreatedEvent { +func sipToGoaPackageCreatedEvent(s *datatypes.SIP) *goapackage.PackageCreatedEvent { var id uint - if p.ID > 0 { - id = uint(p.ID) // #nosec G115 -- range validated. + if s.ID > 0 { + id = uint(s.ID) // #nosec G115 -- range validated. } return &goapackage.PackageCreatedEvent{ ID: id, - Item: p.Goa(), + Item: s.Goa(), } } @@ -41,8 +41,8 @@ func preservationActionToGoa(pa *datatypes.PreservationAction) *goapackage.Endur } var packageID uint - if pa.PackageID > 0 { - packageID = uint(pa.PackageID) // #nosec G115 -- range validated. + if pa.SIPID > 0 { + packageID = uint(pa.SIPID) // #nosec G115 -- range validated. } return &goapackage.EnduroPackagePreservationAction{ @@ -84,7 +84,7 @@ func preservationTaskToGoa(pt *datatypes.PreservationTask) *goapackage.EnduroPac } } -func listPayloadToPackageFilter(payload *goapackage.ListPayload) (*persistence.PackageFilter, error) { +func listPayloadToSIPFilter(payload *goapackage.ListPayload) (*persistence.SIPFilter, error) { aipID, err := stringToUUIDPtr(payload.AipID) if err != nil { return nil, fmt.Errorf("aip_id: %v", err) @@ -95,9 +95,9 @@ func listPayloadToPackageFilter(payload *goapackage.ListPayload) (*persistence.P return nil, fmt.Errorf("location_id: %v", err) } - var status *enums.PackageStatus + var status *enums.SIPStatus if payload.Status != nil { - s, err := enums.ParsePackageStatus(*payload.Status) + s, err := enums.ParseSIPStatus(*payload.Status) if err != nil { return nil, fmt.Errorf("invalid status") } @@ -109,7 +109,7 @@ func listPayloadToPackageFilter(payload *goapackage.ListPayload) (*persistence.P return nil, err } - pf := persistence.PackageFilter{ + pf := persistence.SIPFilter{ AIPID: aipID, Name: payload.Name, LocationID: locID, diff --git a/internal/package_/fake/mock_package_.go b/internal/package_/fake/mock_package_.go index 276bd9ee2..4e3ae5879 100644 --- a/internal/package_/fake/mock_package_.go +++ b/internal/package_/fake/mock_package_.go @@ -121,7 +121,7 @@ func (c *MockServiceCompletePreservationTaskCall) DoAndReturn(f func(context.Con } // Create mocks base method. -func (m *MockService) Create(arg0 context.Context, arg1 *datatypes.Package) error { +func (m *MockService) Create(arg0 context.Context, arg1 *datatypes.SIP) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", arg0, arg1) ret0, _ := ret[0].(error) @@ -147,13 +147,13 @@ func (c *MockServiceCreateCall) Return(arg0 error) *MockServiceCreateCall { } // Do rewrite *gomock.Call.Do -func (c *MockServiceCreateCall) Do(f func(context.Context, *datatypes.Package) error) *MockServiceCreateCall { +func (c *MockServiceCreateCall) Do(f func(context.Context, *datatypes.SIP) error) *MockServiceCreateCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceCreateCall) DoAndReturn(f func(context.Context, *datatypes.Package) error) *MockServiceCreateCall { +func (c *MockServiceCreateCall) DoAndReturn(f func(context.Context, *datatypes.SIP) error) *MockServiceCreateCall { c.Call = c.Call.DoAndReturn(f) return c } @@ -349,7 +349,7 @@ func (c *MockServiceSetPreservationActionStatusCall) DoAndReturn(f func(context. } // SetStatus mocks base method. -func (m *MockService) SetStatus(arg0 context.Context, arg1 int, arg2 enums.PackageStatus) error { +func (m *MockService) SetStatus(arg0 context.Context, arg1 int, arg2 enums.SIPStatus) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetStatus", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -375,13 +375,13 @@ func (c *MockServiceSetStatusCall) Return(arg0 error) *MockServiceSetStatusCall } // Do rewrite *gomock.Call.Do -func (c *MockServiceSetStatusCall) Do(f func(context.Context, int, enums.PackageStatus) error) *MockServiceSetStatusCall { +func (c *MockServiceSetStatusCall) Do(f func(context.Context, int, enums.SIPStatus) error) *MockServiceSetStatusCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceSetStatusCall) DoAndReturn(f func(context.Context, int, enums.PackageStatus) error) *MockServiceSetStatusCall { +func (c *MockServiceSetStatusCall) DoAndReturn(f func(context.Context, int, enums.SIPStatus) error) *MockServiceSetStatusCall { c.Call = c.Call.DoAndReturn(f) return c } @@ -463,7 +463,7 @@ func (c *MockServiceSetStatusPendingCall) DoAndReturn(f func(context.Context, in } // UpdateWorkflowStatus mocks base method. -func (m *MockService) UpdateWorkflowStatus(arg0 context.Context, arg1 int, arg2, arg3, arg4, arg5 string, arg6 enums.PackageStatus, arg7 time.Time) error { +func (m *MockService) UpdateWorkflowStatus(arg0 context.Context, arg1 int, arg2, arg3, arg4, arg5 string, arg6 enums.SIPStatus, arg7 time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateWorkflowStatus", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(error) @@ -489,13 +489,13 @@ func (c *MockServiceUpdateWorkflowStatusCall) Return(arg0 error) *MockServiceUpd } // Do rewrite *gomock.Call.Do -func (c *MockServiceUpdateWorkflowStatusCall) Do(f func(context.Context, int, string, string, string, string, enums.PackageStatus, time.Time) error) *MockServiceUpdateWorkflowStatusCall { +func (c *MockServiceUpdateWorkflowStatusCall) Do(f func(context.Context, int, string, string, string, string, enums.SIPStatus, time.Time) error) *MockServiceUpdateWorkflowStatusCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceUpdateWorkflowStatusCall) DoAndReturn(f func(context.Context, int, string, string, string, string, enums.PackageStatus, time.Time) error) *MockServiceUpdateWorkflowStatusCall { +func (c *MockServiceUpdateWorkflowStatusCall) DoAndReturn(f func(context.Context, int, string, string, string, string, enums.SIPStatus, time.Time) error) *MockServiceUpdateWorkflowStatusCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/internal/package_/goa.go b/internal/package_/goa.go index e2226e097..69b487717 100644 --- a/internal/package_/goa.go +++ b/internal/package_/goa.go @@ -135,12 +135,12 @@ func (w *goaWrapper) List(ctx context.Context, payload *goapackage.ListPayload) payload = &goapackage.ListPayload{} } - pf, err := listPayloadToPackageFilter(payload) + pf, err := listPayloadToSIPFilter(payload) if err != nil { return nil, err } - r, pg, err := w.perSvc.ListPackages(ctx, pf) + r, pg, err := w.perSvc.ListSIPs(ctx, pf) if err != nil { return nil, goapackage.MakeInternalError(err) } @@ -182,7 +182,7 @@ func (w *goaWrapper) PreservationActions( return nil, err } - query := "SELECT id, workflow_id, type, status, CONVERT_TZ(started_at, @@session.time_zone, '+00:00') AS started_at, CONVERT_TZ(completed_at, @@session.time_zone, '+00:00') AS completed_at FROM preservation_action WHERE package_id = ? ORDER BY started_at DESC" + query := "SELECT id, workflow_id, type, status, CONVERT_TZ(started_at, @@session.time_zone, '+00:00') AS started_at, CONVERT_TZ(completed_at, @@session.time_zone, '+00:00') AS completed_at FROM preservation_action WHERE sip_id = ? ORDER BY started_at DESC" args := []interface{}{goapkg.ID} rows, err := w.db.QueryxContext(ctx, query, args...) diff --git a/internal/package_/goa_test.go b/internal/package_/goa_test.go index 6cc8a6c17..7d63e06a4 100644 --- a/internal/package_/goa_test.go +++ b/internal/package_/goa_test.go @@ -124,7 +124,7 @@ func nullUUID(s string) uuid.NullUUID { } } -var testPackages = []*datatypes.Package{ +var testSIPs = []*datatypes.SIP{ { ID: 1, Name: "Test package 1", @@ -132,7 +132,7 @@ var testPackages = []*datatypes.Package{ RunID: "c5f7c35a-d5a6-4e00-b4da-b036ce5b40bc", AIPID: nullUUID("e2ace0da-8697-453d-9ea1-4c9b62309e54"), LocationID: nullUUID("146182ff-9923-4869-bca1-0bbc0f822025"), - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, CreatedAt: time.Date(2024, 9, 25, 9, 31, 10, 0, time.UTC), StartedAt: sql.NullTime{ Time: time.Date(2024, 9, 25, 9, 31, 11, 0, time.UTC), @@ -150,7 +150,7 @@ var testPackages = []*datatypes.Package{ RunID: "d1f172c6-4ec8-4488-8a09-eef422b024cc", AIPID: nullUUID("ffdb12f4-1735-4022-b746-a9bf4a32109b"), LocationID: nullUUID("659a93a0-2a6a-4931-a505-f07f71f5b010"), - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Date(2024, 10, 1, 17, 13, 26, 0, time.UTC), StartedAt: sql.NullTime{ Time: time.Date(2024, 10, 1, 17, 13, 27, 0, time.UTC), @@ -177,13 +177,13 @@ func TestList(t *testing.T) { { name: "Returns all packages", mockRecorder: func(mr *persistence_fake.MockServiceMockRecorder) { - mr.ListPackages( + mr.ListSIPs( mockutil.Context(), - &persistence.PackageFilter{ + &persistence.SIPFilter{ Sort: persistence.NewSort().AddCol("id", true), }, ).Return( - testPackages, + testSIPs, &persistence.Page{Limit: 20, Total: 2}, nil, ) @@ -234,9 +234,9 @@ func TestList(t *testing.T) { Offset: ref.New(1), }, mockRecorder: func(mr *persistence_fake.MockServiceMockRecorder) { - mr.ListPackages( + mr.ListSIPs( mockutil.Context(), - &persistence.PackageFilter{ + &persistence.SIPFilter{ Name: ref.New("Test package 1"), AIPID: ref.New(uuid.MustParse("e2ace0da-8697-453d-9ea1-4c9b62309e54")), LocationID: ref.New(uuid.MustParse("146182ff-9923-4869-bca1-0bbc0f822025")), @@ -244,7 +244,7 @@ func TestList(t *testing.T) { Start: time.Date(2024, 9, 25, 9, 30, 0, 0, time.UTC), End: time.Date(2024, 9, 25, 9, 40, 0, 0, time.UTC), }, - Status: ref.New(enums.PackageStatusDone), + Status: ref.New(enums.SIPStatusDone), Sort: persistence.NewSort().AddCol("id", true), Page: persistence.Page{ Limit: 10, @@ -252,7 +252,7 @@ func TestList(t *testing.T) { }, }, ).Return( - testPackages[0:1], + testSIPs[0:1], &persistence.Page{Limit: 10, Total: 1}, nil, ) @@ -284,14 +284,14 @@ func TestList(t *testing.T) { Name: ref.New("Package 42"), }, mockRecorder: func(mr *persistence_fake.MockServiceMockRecorder) { - mr.ListPackages( + mr.ListSIPs( mockutil.Context(), - &persistence.PackageFilter{ + &persistence.SIPFilter{ Name: ref.New("Package 42"), Sort: persistence.NewSort().AddCol("id", true), }, ).Return( - []*datatypes.Package{}, + []*datatypes.SIP{}, &persistence.Page{}, persistence.ErrNotFound, ) diff --git a/internal/package_/package_.go b/internal/package_/package_.go index 932ba58d0..38ddb1918 100644 --- a/internal/package_/package_.go +++ b/internal/package_/package_.go @@ -26,15 +26,15 @@ var ErrInvalid = errors.New("invalid") type Service interface { // Goa returns an implementation of the goapackage Service. Goa() goapackage.Service - Create(context.Context, *datatypes.Package) error + Create(context.Context, *datatypes.SIP) error UpdateWorkflowStatus( ctx context.Context, ID int, name, workflowID, runID, aipID string, - status enums.PackageStatus, + status enums.SIPStatus, storedAt time.Time, ) error - SetStatus(ctx context.Context, ID int, status enums.PackageStatus) error + SetStatus(ctx context.Context, ID int, status enums.SIPStatus) error SetStatusInProgress(ctx context.Context, ID int, startedAt time.Time) error SetStatusPending(ctx context.Context, ID int) error SetLocationID(ctx context.Context, ID int, locationID uuid.UUID) error @@ -105,13 +105,13 @@ func (svc *packageImpl) Goa() goapackage.Service { // Create persists pkg to the data store then updates it from the data store, // adding generated data (e.g. ID, CreatedAt). -func (svc *packageImpl) Create(ctx context.Context, pkg *datatypes.Package) error { - err := svc.perSvc.CreatePackage(ctx, pkg) +func (svc *packageImpl) Create(ctx context.Context, pkg *datatypes.SIP) error { + err := svc.perSvc.CreateSIP(ctx, pkg) if err != nil { return fmt.Errorf("package: create: %v", err) } - event.PublishEvent(ctx, svc.evsvc, packageToGoaPackageCreatedEvent(pkg)) + event.PublishEvent(ctx, svc.evsvc, sipToGoaPackageCreatedEvent(pkg)) return nil } @@ -120,12 +120,12 @@ func (svc *packageImpl) UpdateWorkflowStatus( ctx context.Context, ID int, name, workflowID, runID, aipID string, - status enums.PackageStatus, + status enums.SIPStatus, storedAt time.Time, ) error { // Ensure that storedAt is reset during retries. completedAt := &storedAt - if status == enums.PackageStatusInProgress { + if status == enums.SIPStatusInProgress { completedAt = nil } if completedAt != nil && completedAt.IsZero() { @@ -137,7 +137,7 @@ func (svc *packageImpl) UpdateWorkflowStatus( } id := uint(ID) // #nosec G115 -- range validated. - query := `UPDATE package SET name = ?, workflow_id = ?, run_id = ?, aip_id = ?, status = ?, completed_at = ? WHERE id = ?` + query := `UPDATE sip SET name = ?, workflow_id = ?, run_id = ?, aip_id = ?, status = ?, completed_at = ? WHERE id = ?` args := []interface{}{ name, workflowID, @@ -160,13 +160,13 @@ func (svc *packageImpl) UpdateWorkflowStatus( return nil } -func (svc *packageImpl) SetStatus(ctx context.Context, ID int, status enums.PackageStatus) error { +func (svc *packageImpl) SetStatus(ctx context.Context, ID int, status enums.SIPStatus) error { if ID < 0 { return fmt.Errorf("%w: ID", ErrInvalid) } id := uint(ID) // #nosec G115 -- range validated. - query := `UPDATE package SET status = ? WHERE id = ?` + query := `UPDATE sip SET status = ? WHERE id = ?` args := []interface{}{ status, ID, @@ -184,7 +184,7 @@ func (svc *packageImpl) SetStatus(ctx context.Context, ID int, status enums.Pack func (svc *packageImpl) SetStatusInProgress(ctx context.Context, ID int, startedAt time.Time) error { var query string - args := []interface{}{enums.PackageStatusInProgress} + args := []interface{}{enums.SIPStatusInProgress} if ID < 0 { return fmt.Errorf("%w: ID", ErrInvalid) @@ -192,10 +192,10 @@ func (svc *packageImpl) SetStatusInProgress(ctx context.Context, ID int, started id := uint(ID) // #nosec G115 -- range validated. if !startedAt.IsZero() { - query = `UPDATE package SET status = ?, started_at = ? WHERE id = ?` + query = `UPDATE sip SET status = ?, started_at = ? WHERE id = ?` args = append(args, startedAt, ID) } else { - query = `UPDATE package SET status = ? WHERE id = ?` + query = `UPDATE sip SET status = ? WHERE id = ?` args = append(args, ID) } @@ -205,16 +205,16 @@ func (svc *packageImpl) SetStatusInProgress(ctx context.Context, ID int, started event.PublishEvent(ctx, svc.evsvc, &goapackage.PackageStatusUpdatedEvent{ ID: id, - Status: enums.PackageStatusInProgress.String(), + Status: enums.SIPStatusInProgress.String(), }) return nil } func (svc *packageImpl) SetStatusPending(ctx context.Context, ID int) error { - query := `UPDATE package SET status = ?, WHERE id = ?` + query := `UPDATE sip SET status = ?, WHERE id = ?` args := []interface{}{ - enums.PackageStatusPending, + enums.SIPStatusPending, ID, } @@ -229,7 +229,7 @@ func (svc *packageImpl) SetStatusPending(ctx context.Context, ID int) error { event.PublishEvent(ctx, svc.evsvc, &goapackage.PackageStatusUpdatedEvent{ ID: id, - Status: enums.PackageStatusPending.String(), + Status: enums.SIPStatusPending.String(), }) return nil @@ -241,7 +241,7 @@ func (svc *packageImpl) SetLocationID(ctx context.Context, ID int, locationID uu } id := uint(ID) // #nosec G115 -- range validated. - query := `UPDATE package SET location_id = ? WHERE id = ?` + query := `UPDATE sip SET location_id = ? WHERE id = ?` args := []interface{}{ locationID, ID, @@ -268,10 +268,10 @@ func (svc *packageImpl) updateRow(ctx context.Context, query string, args []inte return nil } -func (svc *packageImpl) read(ctx context.Context, ID uint) (*datatypes.Package, error) { - query := "SELECT id, name, workflow_id, run_id, aip_id, location_id, status, CONVERT_TZ(created_at, @@session.time_zone, '+00:00') AS created_at, CONVERT_TZ(started_at, @@session.time_zone, '+00:00') AS started_at, CONVERT_TZ(completed_at, @@session.time_zone, '+00:00') AS completed_at FROM package WHERE id = ?" +func (svc *packageImpl) read(ctx context.Context, ID uint) (*datatypes.SIP, error) { + query := "SELECT id, name, workflow_id, run_id, aip_id, location_id, status, CONVERT_TZ(created_at, @@session.time_zone, '+00:00') AS created_at, CONVERT_TZ(started_at, @@session.time_zone, '+00:00') AS started_at, CONVERT_TZ(completed_at, @@session.time_zone, '+00:00') AS completed_at FROM sip WHERE id = ?" args := []interface{}{ID} - c := datatypes.Package{} + c := datatypes.SIP{} if err := svc.db.GetContext(ctx, &c, query, args...); err != nil { return nil, err diff --git a/internal/package_/package__test.go b/internal/package_/package__test.go index 81d346a4c..1f8414c59 100644 --- a/internal/package_/package__test.go +++ b/internal/package_/package__test.go @@ -47,26 +47,26 @@ func TestCreatePackage(t *testing.T) { type test struct { name string - pkg datatypes.Package - mock func(*persistence_fake.MockService, datatypes.Package) *persistence_fake.MockService + sip datatypes.SIP + mock func(*persistence_fake.MockService, datatypes.SIP) *persistence_fake.MockService wantErr string } for _, tt := range []test{ { name: "creates a package", - pkg: datatypes.Package{ + sip: datatypes.SIP{ Name: "test", WorkflowID: "4258090a-e27b-4fd9-a76b-28deb3d16813", RunID: "8f3a5756-6bc5-4d82-846d-59442dd6ad8f", - Status: enums.PackageStatusQueued, + Status: enums.SIPStatusQueued, }, - mock: func(svc *persistence_fake.MockService, p datatypes.Package) *persistence_fake.MockService { + mock: func(svc *persistence_fake.MockService, s datatypes.SIP) *persistence_fake.MockService { svc.EXPECT(). - CreatePackage(mockutil.Context(), &p). + CreateSIP(mockutil.Context(), &s). DoAndReturn( - func(ctx context.Context, p *datatypes.Package) error { - p.ID = 1 - p.CreatedAt = time.Date(2024, 3, 14, 15, 57, 25, 0, time.UTC) + func(ctx context.Context, s *datatypes.SIP) error { + s.ID = 1 + s.CreatedAt = time.Date(2024, 3, 14, 15, 57, 25, 0, time.UTC) return nil }, ) @@ -75,16 +75,16 @@ func TestCreatePackage(t *testing.T) { }, { name: "errors creating a package with a missing RunID", - pkg: datatypes.Package{ + sip: datatypes.SIP{ Name: "test", WorkflowID: "4258090a-e27b-4fd9-a76b-28deb3d16813", - Status: enums.PackageStatusQueued, + Status: enums.SIPStatusQueued, }, - mock: func(svc *persistence_fake.MockService, p datatypes.Package) *persistence_fake.MockService { + mock: func(svc *persistence_fake.MockService, s datatypes.SIP) *persistence_fake.MockService { svc.EXPECT(). - CreatePackage(mockutil.Context(), &p). + CreateSIP(mockutil.Context(), &s). DoAndReturn( - func(ctx context.Context, p *datatypes.Package) error { + func(ctx context.Context, s *datatypes.SIP) error { return fmt.Errorf("invalid data error: field \"RunID\" is required") }, ) @@ -98,11 +98,11 @@ func TestCreatePackage(t *testing.T) { pkgSvc, perSvc := testSvc(t, nil, 0) if tt.mock != nil { - tt.mock(perSvc, tt.pkg) + tt.mock(perSvc, tt.sip) } - pkg := tt.pkg - err := pkgSvc.Create(context.Background(), &pkg) + sip := tt.sip + err := pkgSvc.Create(context.Background(), &sip) if tt.wantErr != "" { assert.Error(t, err, tt.wantErr) diff --git a/internal/package_/preservation_action.go b/internal/package_/preservation_action.go index c39717b0e..1afc4806b 100644 --- a/internal/package_/preservation_action.go +++ b/internal/package_/preservation_action.go @@ -97,9 +97,9 @@ func (svc *packageImpl) readPreservationAction( preservation_action.status, CONVERT_TZ(preservation_action.started_at, @@session.time_zone, '+00:00') AS started_at, CONVERT_TZ(preservation_action.completed_at, @@session.time_zone, '+00:00') AS completed_at, - preservation_action.package_id + preservation_action.sip_id FROM preservation_action - LEFT JOIN package ON (preservation_action.package_id = package.id) + LEFT JOIN sip ON (preservation_action.sip_id = sip.id) WHERE preservation_action.id = ? ` diff --git a/internal/package_/preservation_action_test.go b/internal/package_/preservation_action_test.go index b37495439..a726f3932 100644 --- a/internal/package_/preservation_action_test.go +++ b/internal/package_/preservation_action_test.go @@ -40,7 +40,7 @@ func TestCreatePreservationAction(t *testing.T) { name: "Creates a preservation action", pa: datatypes.PreservationAction{ WorkflowID: workflowID, - PackageID: 1, + SIPID: 1, }, want: datatypes.PreservationAction{ ID: 11, @@ -51,7 +51,7 @@ func TestCreatePreservationAction(t *testing.T) { Time: time.Date(2024, 6, 3, 9, 4, 23, 0, time.UTC), Valid: true, }, - PackageID: 1, + SIPID: 1, }, mock: func(svc *persistence_fake.MockService, pa datatypes.PreservationAction) *persistence_fake.MockService { svc.EXPECT(). @@ -77,7 +77,7 @@ func TestCreatePreservationAction(t *testing.T) { Status: enums.PreservationActionStatusDone, StartedAt: startedAt, CompletedAt: completedAt, - PackageID: 1, + SIPID: 1, }, want: datatypes.PreservationAction{ ID: 11, @@ -86,7 +86,7 @@ func TestCreatePreservationAction(t *testing.T) { Status: enums.PreservationActionStatusDone, StartedAt: startedAt, CompletedAt: completedAt, - PackageID: 1, + SIPID: 1, }, mock: func(svc *persistence_fake.MockService, pa datatypes.PreservationAction) *persistence_fake.MockService { svc.EXPECT(). @@ -103,7 +103,7 @@ func TestCreatePreservationAction(t *testing.T) { { name: "Errors when WorkflowID is missing", pa: datatypes.PreservationAction{ - PackageID: 1, + SIPID: 1, }, wantErr: "preservation action: create: invalid data error: field \"WorkflowID\" is required", mock: func(svc *persistence_fake.MockService, pa datatypes.PreservationAction) *persistence_fake.MockService { diff --git a/internal/persistence/ent/client/client_test.go b/internal/persistence/ent/client/client_test.go index 5ed778d2f..4e9246a23 100644 --- a/internal/persistence/ent/client/client_test.go +++ b/internal/persistence/ent/client/client_test.go @@ -29,15 +29,15 @@ func setUpClient(t *testing.T, logger logr.Logger) (*db.Client, persistence.Serv return entc, c } -func createPackage( +func createSIP( entc *db.Client, name string, - status enums.PackageStatus, -) (*db.Pkg, error) { + status enums.SIPStatus, +) (*db.SIP, error) { runID := uuid.MustParse("aee9644d-6397-4b34-92f7-442ad3dd3b13") aipID := uuid.MustParse("30223842-0650-4f79-80bd-7bf43b810656") - return entc.Pkg.Create(). + return entc.SIP.Create(). SetName(name). SetWorkflowID("12345"). SetRunID(runID). @@ -48,14 +48,14 @@ func createPackage( func createPreservationAction( entc *db.Client, - pkgID int, + sipID int, status enums.PreservationActionStatus, ) (*db.PreservationAction, error) { return entc.PreservationAction.Create(). SetWorkflowID("12345"). SetType(int8(enums.PreservationActionTypeCreateAip)). SetStatus(int8(status)). // #nosec G115 -- constrained value. - SetPackageID(pkgID). + SetSipID(sipID). Save(context.Background()) } @@ -66,17 +66,17 @@ func TestNew(t *testing.T) { t.Parallel() entc, _ := setUpClient(t, logr.Discard()) - p, err := createPackage( + s, err := createSIP( entc, "testing 1-2-3", - enums.PackageStatusInProgress, + enums.SIPStatusInProgress, ) assert.NilError(t, err) - assert.Equal(t, p.Name, "testing 1-2-3") - assert.Equal(t, p.WorkflowID, "12345") - assert.Equal(t, p.RunID, uuid.MustParse("aee9644d-6397-4b34-92f7-442ad3dd3b13")) - assert.Equal(t, p.AipID, uuid.MustParse("30223842-0650-4f79-80bd-7bf43b810656")) - assert.Equal(t, p.Status, int8(enums.PackageStatusInProgress)) + assert.Equal(t, s.Name, "testing 1-2-3") + assert.Equal(t, s.WorkflowID, "12345") + assert.Equal(t, s.RunID, uuid.MustParse("aee9644d-6397-4b34-92f7-442ad3dd3b13")) + assert.Equal(t, s.AipID, uuid.MustParse("30223842-0650-4f79-80bd-7bf43b810656")) + assert.Equal(t, s.Status, int8(enums.SIPStatusInProgress)) }) } diff --git a/internal/persistence/ent/client/convert.go b/internal/persistence/ent/client/convert.go index c23fa1104..c31ada020 100644 --- a/internal/persistence/ent/client/convert.go +++ b/internal/persistence/ent/client/convert.go @@ -10,41 +10,41 @@ import ( "github.com/artefactual-sdps/enduro/internal/persistence/ent/db" ) -// convertPkgToPackage converts an entgo `db.Pkg` package representation to a -// `datatypes.Package` representation. -func convertPkgToPackage(pkg *db.Pkg) *datatypes.Package { +// convertSIP converts an entgo `db.SIP` representation to a +// `datatypes.SIP` representation. +func convertSIP(sip *db.SIP) *datatypes.SIP { var started, completed sql.NullTime - if !pkg.StartedAt.IsZero() { - started = sql.NullTime{Time: pkg.StartedAt, Valid: true} + if !sip.StartedAt.IsZero() { + started = sql.NullTime{Time: sip.StartedAt, Valid: true} } - if !pkg.CompletedAt.IsZero() { - completed = sql.NullTime{Time: pkg.CompletedAt, Valid: true} + if !sip.CompletedAt.IsZero() { + completed = sql.NullTime{Time: sip.CompletedAt, Valid: true} } var aipID uuid.NullUUID - if pkg.AipID != uuid.Nil { - aipID = uuid.NullUUID{UUID: pkg.AipID, Valid: true} + if sip.AipID != uuid.Nil { + aipID = uuid.NullUUID{UUID: sip.AipID, Valid: true} } var locID uuid.NullUUID - if pkg.LocationID != uuid.Nil { - locID = uuid.NullUUID{UUID: pkg.LocationID, Valid: true} + if sip.LocationID != uuid.Nil { + locID = uuid.NullUUID{UUID: sip.LocationID, Valid: true} } var status uint - if pkg.Status > 0 { - status = uint(pkg.Status) // #nosec G115 -- range validated. + if sip.Status > 0 { + status = uint(sip.Status) // #nosec G115 -- range validated. } - return &datatypes.Package{ - ID: pkg.ID, - Name: pkg.Name, - WorkflowID: pkg.WorkflowID, - RunID: pkg.RunID.String(), + return &datatypes.SIP{ + ID: sip.ID, + Name: sip.Name, + WorkflowID: sip.WorkflowID, + RunID: sip.RunID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatus(status), - CreatedAt: pkg.CreatedAt, + Status: enums.SIPStatus(status), + CreatedAt: sip.CreatedAt, StartedAt: started, CompletedAt: completed, } @@ -70,7 +70,7 @@ func convertPreservationAction(pa *db.PreservationAction) *datatypes.Preservatio Status: enums.PreservationActionStatus(pa.Status), // #nosec G115 -- constrained value. StartedAt: started, CompletedAt: completed, - PackageID: pa.PackageID, + SIPID: pa.SipID, } } diff --git a/internal/persistence/ent/client/filter.go b/internal/persistence/ent/client/filter.go index 5d04a550c..834b59d71 100644 --- a/internal/persistence/ent/client/filter.go +++ b/internal/persistence/ent/client/filter.go @@ -175,8 +175,8 @@ func (f *Filter[Q, O, P]) addFilter(column string, selector func(s *sql.Selector // // Validating pointers is complicated because ptr has an interface{} type. The // conditional `ptr == nil` doesn't evaluate true when ptr is a typed nil like -// (*enums.PackageStatus)(nil). A type switch case on the validator interface -// can then assign the nil *enums.PackageStatus to the validator interface and +// (*enums.SIPStatus)(nil). A type switch case on the validator interface +// can then assign the nil *enums.SIPStatus to the validator interface and // calling `t.IsValid()` causes a panic from trying to call `IsValid()` on a // nil pointer. func validPtrValue(ptr any) bool { @@ -185,7 +185,7 @@ func validPtrValue(ptr any) bool { } switch t := ptr.(type) { - case *enums.PackageStatus: + case *enums.SIPStatus: return t != nil && t.IsValid() case *enums.PreprocessingTaskOutcome: return t != nil && t.IsValid() @@ -216,8 +216,8 @@ func (f *Filter[Q, O, P]) Contains(column string, value *string) { // validator interface, value is validated before the filter is added. func (f *Filter[Q, O, P]) Equals(column string, value any) { // The current code always calls this function with a pointer value (e.g. - // *string, *enums.PackageStatus). If we need to pass value types (e.g. - // (string, enums.PackageStatus) in the future we'll have to combine the + // *string, *enums.SIPStatus). If we need to pass value types (e.g. + // (string, enums.SIPStatus) in the future we'll have to combine the // validPtrValue() & validValue() type switch cases. if !validPtrValue(value) { return diff --git a/internal/persistence/ent/client/filter_test.go b/internal/persistence/ent/client/filter_test.go index f3df63e73..f225a33f3 100644 --- a/internal/persistence/ent/client/filter_test.go +++ b/internal/persistence/ent/client/filter_test.go @@ -20,7 +20,7 @@ type pred func(*sql.Selector) type orderOpt func(*sql.Selector) -// query is a querier like *db.PkgQuery for testing. +// query is a querier like *db.SIPQuery for testing. type query struct { table string limit int @@ -391,19 +391,19 @@ func TestFilter(t *testing.T) { f.Equals("outcome", &taskOutcome) // Add an integer enum filter. - pkgStatus := enums.PackageStatusDone - f.Equals("status", &pkgStatus) + sipStatus := enums.SIPStatusDone + f.Equals("status", &sipStatus) // Omit invalid enum values. f.Equals("outcome2", ref.New(enums.PreprocessingTaskOutcome("invalid"))) // Omit nil enum pointers. - f.Equals("status2", (*enums.PackageStatus)(nil)) + f.Equals("status2", (*enums.SIPStatus)(nil)) _, whole := f.Apply() assert.Equal(t, whole.where, "`data`.`outcome` = ? AND `data`.`status` = ?") - assert.DeepEqual(t, whole.args, []any{&taskOutcome, &pkgStatus}) + assert.DeepEqual(t, whole.args, []any{&taskOutcome, &sipStatus}) }) t.Run("Filters on a list of strings", func(t *testing.T) { @@ -430,16 +430,16 @@ func TestFilter(t *testing.T) { newSortableFields("id"), ) f.In("status", []any{ - enums.PackageStatusInProgress, - enums.PackageStatusDone, - enums.PackageStatus(100), // Ignore an invalid enum. + enums.SIPStatusInProgress, + enums.SIPStatusDone, + enums.SIPStatus(100), // Ignore an invalid enum. }) _, whole := f.Apply() assert.Equal(t, whole.where, "`data`.`status` IN (?, ?)") assert.DeepEqual(t, whole.args, []any{ - enums.PackageStatusInProgress, - enums.PackageStatusDone, + enums.SIPStatusInProgress, + enums.SIPStatusDone, }) }) diff --git a/internal/persistence/ent/client/preservation_action.go b/internal/persistence/ent/client/preservation_action.go index 570c1b754..0d5435817 100644 --- a/internal/persistence/ent/client/preservation_action.go +++ b/internal/persistence/ent/client/preservation_action.go @@ -12,8 +12,8 @@ func (c *client) CreatePreservationAction(ctx context.Context, pa *datatypes.Pre if pa.WorkflowID == "" { return newRequiredFieldError("WorkflowID") } - if pa.PackageID == 0 { - return newRequiredFieldError("PackageID") + if pa.SIPID == 0 { + return newRequiredFieldError("SIPID") } // TODO: Validate Type & Status enums. @@ -35,7 +35,7 @@ func (c *client) CreatePreservationAction(ctx context.Context, pa *datatypes.Pre SetStatus(int8(pa.Status)). // #nosec G115 -- constrained value. SetNillableStartedAt(startedAt). SetNillableCompletedAt(completedAt). - SetPackageID(pa.PackageID) + SetSipID(pa.SIPID) r, err := q.Save(ctx) if err != nil { diff --git a/internal/persistence/ent/client/preservation_action_test.go b/internal/persistence/ent/client/preservation_action_test.go index ce0dc2fb7..94c0ca70a 100644 --- a/internal/persistence/ent/client/preservation_action_test.go +++ b/internal/persistence/ent/client/preservation_action_test.go @@ -21,8 +21,8 @@ func TestCreatePreservationAction(t *testing.T) { completed := sql.NullTime{Time: started.Time.Add(time.Second), Valid: true} type params struct { - pa *datatypes.PreservationAction - setPackageID bool + pa *datatypes.PreservationAction + setSIPID bool } tests := []struct { name string @@ -40,7 +40,7 @@ func TestCreatePreservationAction(t *testing.T) { StartedAt: started, CompletedAt: completed, }, - setPackageID: true, + setSIPID: true, }, want: &datatypes.PreservationAction{ ID: 1, @@ -49,7 +49,7 @@ func TestCreatePreservationAction(t *testing.T) { Status: enums.PreservationActionStatusDone, StartedAt: started, CompletedAt: completed, - PackageID: 1, + SIPID: 1, }, }, { @@ -63,7 +63,7 @@ func TestCreatePreservationAction(t *testing.T) { wantErr: "invalid data error: field \"WorkflowID\" is required", }, { - name: "Required field error for missing PackageID", + name: "Required field error for missing SIPID", args: params{ pa: &datatypes.PreservationAction{ WorkflowID: workflowID, @@ -71,16 +71,16 @@ func TestCreatePreservationAction(t *testing.T) { Status: enums.PreservationActionStatusDone, }, }, - wantErr: "invalid data error: field \"PackageID\" is required", + wantErr: "invalid data error: field \"SIPID\" is required", }, { - name: "Foreign key error on an invalid PackageID", + name: "Foreign key error on an invalid SIPID", args: params{ pa: &datatypes.PreservationAction{ WorkflowID: workflowID, Type: 9, Status: enums.PreservationActionStatusDone, - PackageID: 12345, + SIPID: 12345, }, }, wantErr: "invalid data error: db: constraint failed: FOREIGN KEY constraint failed: create preservation action", @@ -92,15 +92,15 @@ func TestCreatePreservationAction(t *testing.T) { entc, svc := setUpClient(t, logr.Discard()) ctx := context.Background() - pkg, _ := createPackage( + sip, _ := createSIP( entc, - "Test package", - enums.PackageStatusInProgress, + "Test SIP", + enums.SIPStatusInProgress, ) pa := *tt.args.pa // Make a local copy. - if tt.args.setPackageID { - pa.PackageID = pkg.ID + if tt.args.setSIPID { + pa.SIPID = sip.ID } err := svc.CreatePreservationAction(ctx, &pa) diff --git a/internal/persistence/ent/client/preservation_task_test.go b/internal/persistence/ent/client/preservation_task_test.go index 20d92138a..f098ca6fb 100644 --- a/internal/persistence/ent/client/preservation_task_test.go +++ b/internal/persistence/ent/client/preservation_task_test.go @@ -24,17 +24,17 @@ func addDBFixtures( ) (*db.PreservationAction, *db.PreservationAction) { t.Helper() - pkg, err := createPackage(entc, "P1", enums.PackageStatusInProgress) + sip, err := createSIP(entc, "S1", enums.SIPStatusInProgress) if err != nil { - t.Errorf("create package: %v", err) + t.Errorf("create SIP: %v", err) } - pa, err := createPreservationAction(entc, pkg.ID, enums.PreservationActionStatusInProgress) + pa, err := createPreservationAction(entc, sip.ID, enums.PreservationActionStatusInProgress) if err != nil { t.Errorf("create preservation action: %v", err) } - pa2, err := createPreservationAction(entc, pkg.ID, enums.PreservationActionStatusDone) + pa2, err := createPreservationAction(entc, sip.ID, enums.PreservationActionStatusDone) if err != nil { t.Errorf("create preservation action 2: %v", err) } @@ -118,14 +118,14 @@ func TestCreatePreservationTask(t *testing.T) { entc, svc := setUpClient(t, logr.Discard()) ctx := context.Background() - pkg, _ := createPackage( + sip, _ := createSIP( entc, - "Test package", - enums.PackageStatusDone, + "Test SIP", + enums.SIPStatusDone, ) pa, _ := createPreservationAction( entc, - pkg.ID, + sip.ID, enums.PreservationActionStatusDone, ) diff --git a/internal/persistence/ent/client/package.go b/internal/persistence/ent/client/sip.go similarity index 51% rename from internal/persistence/ent/client/package.go rename to internal/persistence/ent/client/sip.go index 4fdc897a0..5cc7743e5 100644 --- a/internal/persistence/ent/client/package.go +++ b/internal/persistence/ent/client/sip.go @@ -9,88 +9,88 @@ import ( "github.com/artefactual-sdps/enduro/internal/datatypes" "github.com/artefactual-sdps/enduro/internal/persistence" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) -// CreatePackage creates and persists a new package using the values from pkg -// then returns the updated package. +// CreateSIP creates and persists a new SIP using the values from s +// then returns the updated SIP. // -// The input pkg "ID" and "CreatedAt" values are ignored; the stored package +// The input SIP "ID" and "CreatedAt" values are ignored; the stored SIP // "ID" is generated by the persistence implementation and "CreatedAt" is always // set to the current time. -func (c *client) CreatePackage(ctx context.Context, pkg *datatypes.Package) error { +func (c *client) CreateSIP(ctx context.Context, s *datatypes.SIP) error { // Validate required fields. - if pkg.Name == "" { + if s.Name == "" { return newRequiredFieldError("Name") } - if pkg.WorkflowID == "" { + if s.WorkflowID == "" { return newRequiredFieldError("WorkflowID") } - if pkg.RunID == "" { + if s.RunID == "" { return newRequiredFieldError("RunID") } - runID, err := uuid.Parse(pkg.RunID) + runID, err := uuid.Parse(s.RunID) if err != nil { return newParseError(err, "RunID") } - q := c.ent.Pkg.Create(). - SetName(pkg.Name). - SetWorkflowID(pkg.WorkflowID). + q := c.ent.SIP.Create(). + SetName(s.Name). + SetWorkflowID(s.WorkflowID). SetRunID(runID). - SetStatus(int8(pkg.Status)) // #nosec G115 -- constrained value. + SetStatus(int8(s.Status)) // #nosec G115 -- constrained value. // Add optional fields. - if pkg.AIPID.Valid { - q.SetAipID(pkg.AIPID.UUID) + if s.AIPID.Valid { + q.SetAipID(s.AIPID.UUID) } - if pkg.LocationID.Valid { - q.SetLocationID(pkg.LocationID.UUID) + if s.LocationID.Valid { + q.SetLocationID(s.LocationID.UUID) } - if pkg.StartedAt.Valid { - q.SetStartedAt(pkg.StartedAt.Time) + if s.StartedAt.Valid { + q.SetStartedAt(s.StartedAt.Time) } - if pkg.CompletedAt.Valid { - q.SetCompletedAt(pkg.CompletedAt.Time) + if s.CompletedAt.Valid { + q.SetCompletedAt(s.CompletedAt.Time) } // Set CreatedAt to the current time q.SetCreatedAt(time.Now()) - // Save the package. - p, err := q.Save(ctx) + // Save the SIP. + dbs, err := q.Save(ctx) if err != nil { - return newDBErrorWithDetails(err, "create package") + return newDBErrorWithDetails(err, "create SIP") } - // Update pkg with DB data, to get generated values (e.g. ID). - *pkg = *convertPkgToPackage(p) + // Update SIP with DB data, to get generated values (e.g. ID). + *s = *convertSIP(dbs) return nil } -// UpdatePackage updates the persisted package identified by id using the -// updater function, then returns the updated package. +// UpdateSIP updates the persisted SIP identified by id using the +// updater function, then returns the updated SIP. // -// The package "ID" and "CreatedAt" field values can not be updated with this +// The SIP "ID" and "CreatedAt" field values can not be updated with this // method. -func (c *client) UpdatePackage( +func (c *client) UpdateSIP( ctx context.Context, id int, - updater persistence.PackageUpdater, -) (*datatypes.Package, error) { + updater persistence.SIPUpdater, +) (*datatypes.SIP, error) { tx, err := c.ent.BeginTx(ctx, nil) if err != nil { return nil, newDBError(err) } - p, err := tx.Pkg.Get(ctx, id) + s, err := tx.SIP.Get(ctx, id) if err != nil { return nil, rollback(tx, newDBError(err)) } - up, err := updater(convertPkgToPackage(p)) + up, err := updater(convertSIP(s)) if err != nil { return nil, rollback(tx, newUpdaterError(err)) } @@ -101,7 +101,7 @@ func (c *client) UpdatePackage( } // Set required column values. - q := tx.Pkg.UpdateOneID(id). + q := tx.SIP.UpdateOneID(id). SetName(up.Name). SetWorkflowID(up.WorkflowID). SetRunID(runID). @@ -122,7 +122,7 @@ func (c *client) UpdatePackage( } // Save changes. - p, err = q.Save(ctx) + s, err = q.Save(ctx) if err != nil { return nil, rollback(tx, newDBError(err)) } @@ -130,20 +130,20 @@ func (c *client) UpdatePackage( return nil, rollback(tx, newDBError(err)) } - return convertPkgToPackage(p), nil + return convertSIP(s), nil } -// ListPackages returns a slice of packages filtered according to f. -func (c *client) ListPackages(ctx context.Context, f *persistence.PackageFilter) ( - []*datatypes.Package, *persistence.Page, error, +// ListSIPs returns a slice of SIPs filtered according to f. +func (c *client) ListSIPs(ctx context.Context, f *persistence.SIPFilter) ( + []*datatypes.SIP, *persistence.Page, error, ) { - res := []*datatypes.Package{} + res := []*datatypes.SIP{} if f == nil { - f = &persistence.PackageFilter{} + f = &persistence.SIPFilter{} } - page, whole := filterPackages(c.ent.Pkg.Query(), f) + page, whole := filterSIPs(c.ent.SIP.Query(), f) r, err := page.All(ctx) if err != nil { @@ -151,7 +151,7 @@ func (c *client) ListPackages(ctx context.Context, f *persistence.PackageFilter) } for _, i := range r { - res = append(res, convertPkgToPackage(i)) + res = append(res, convertSIP(i)) } total, err := whole.Count(ctx) @@ -168,20 +168,20 @@ func (c *client) ListPackages(ctx context.Context, f *persistence.PackageFilter) return res, pp, err } -// filterPackages applies the package filter f to the query q. -func filterPackages(q *db.PkgQuery, f *persistence.PackageFilter) (page, whole *db.PkgQuery) { +// filterSIPs applies the SIP filter f to the query q. +func filterSIPs(q *db.SIPQuery, f *persistence.SIPFilter) (page, whole *db.SIPQuery) { qf := NewFilter(q, SortableFields{ - pkg.FieldID: {Name: "ID", Default: true}, + sip.FieldID: {Name: "ID", Default: true}, }) - qf.Contains(pkg.FieldName, f.Name) - qf.Equals(pkg.FieldAipID, f.AIPID) - qf.Equals(pkg.FieldLocationID, f.LocationID) - qf.Equals(pkg.FieldStatus, f.Status) - qf.AddDateRange(pkg.FieldCreatedAt, f.CreatedAt) + qf.Contains(sip.FieldName, f.Name) + qf.Equals(sip.FieldAipID, f.AIPID) + qf.Equals(sip.FieldLocationID, f.LocationID) + qf.Equals(sip.FieldStatus, f.Status) + qf.AddDateRange(sip.FieldCreatedAt, f.CreatedAt) qf.OrderBy(f.Sort) qf.Page(f.Limit, f.Offset) - // Update the PackageFilter values with the actual values set on the query. + // Update the SIPFilter values with the actual values set on the query. // E.g. calling `h.Page(0,0)` will set the query limit equal to the default // page size. f.Limit = qf.limit diff --git a/internal/persistence/ent/client/package_test.go b/internal/persistence/ent/client/sip_test.go similarity index 69% rename from internal/persistence/ent/client/package_test.go rename to internal/persistence/ent/client/sip_test.go index d497b726c..47f878c4f 100644 --- a/internal/persistence/ent/client/package_test.go +++ b/internal/persistence/ent/client/sip_test.go @@ -21,7 +21,7 @@ import ( "github.com/artefactual-sdps/enduro/internal/timerange" ) -func TestCreatePackage(t *testing.T) { +func TestCreateSIP(t *testing.T) { t.Parallel() runID := uuid.New() @@ -31,71 +31,71 @@ func TestCreatePackage(t *testing.T) { completed := sql.NullTime{Time: started.Time.Add(time.Second), Valid: true} type params struct { - pkg *datatypes.Package + sip *datatypes.SIP } tests := []struct { name string args params - want *datatypes.Package + want *datatypes.SIP wantErr string }{ { - name: "Saves a new package in the DB", + name: "Saves a new SIP in the DB", args: params{ - pkg: &datatypes.Package{ - Name: "Test package 1", + sip: &datatypes.SIP{ + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started, CompletedAt: completed, }, }, - want: &datatypes.Package{ + want: &datatypes.SIP{ ID: 1, - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started, CompletedAt: completed, }, }, { - name: "Saves a package with missing optional fields", + name: "Saves a SIP with missing optional fields", args: params{ - pkg: &datatypes.Package{ - Name: "Test package 2", + sip: &datatypes.SIP{ + Name: "Test SIP 2", WorkflowID: "workflow-2", RunID: runID.String(), - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, }, }, - want: &datatypes.Package{ + want: &datatypes.SIP{ ID: 1, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-2", RunID: runID.String(), - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), }, }, { name: "Required field error for missing Name", args: params{ - pkg: &datatypes.Package{}, + sip: &datatypes.SIP{}, }, wantErr: "invalid data error: field \"Name\" is required", }, { name: "Required field error for missing WorkflowID", args: params{ - pkg: &datatypes.Package{ + sip: &datatypes.SIP{ Name: "Missing WorkflowID", }, }, @@ -104,7 +104,7 @@ func TestCreatePackage(t *testing.T) { { name: "Required field error for missing RunID", args: params{ - pkg: &datatypes.Package{ + sip: &datatypes.SIP{ Name: "Missing RunID", WorkflowID: "workflow-12345", }, @@ -114,8 +114,8 @@ func TestCreatePackage(t *testing.T) { { name: "Errors on invalid RunID", args: params{ - pkg: &datatypes.Package{ - Name: "Invalid package 1", + sip: &datatypes.SIP{ + Name: "Invalid SIP 1", WorkflowID: "workflow-invalid", RunID: "Bad UUID", }, @@ -129,24 +129,24 @@ func TestCreatePackage(t *testing.T) { _, svc := setUpClient(t, logr.Discard()) ctx := context.Background() - pkg := *tt.args.pkg // Make a local copy of pkg. + sip := *tt.args.sip // Make a local copy of sip. - err := svc.CreatePackage(ctx, &pkg) + err := svc.CreateSIP(ctx, &sip) if tt.wantErr != "" { assert.Error(t, err, tt.wantErr) return } assert.NilError(t, err) - assert.DeepEqual(t, &pkg, tt.want, + assert.DeepEqual(t, &sip, tt.want, cmpopts.EquateApproxTime(time.Millisecond*100), - cmpopts.IgnoreUnexported(db.Pkg{}, db.PkgEdges{}), + cmpopts.IgnoreUnexported(db.SIP{}, db.SIPEdges{}), ) }) } } -func TestUpdatePackage(t *testing.T) { +func TestUpdateSIP(t *testing.T) { t.Parallel() runID := uuid.MustParse("c5f7c35a-d5a6-4e00-b4da-b036ce5b40bc") @@ -183,50 +183,50 @@ func TestUpdatePackage(t *testing.T) { completed2 := sql.NullTime{Time: started2.Time.Add(time.Second), Valid: true} type params struct { - pkg *datatypes.Package - updater persistence.PackageUpdater + sip *datatypes.SIP + updater persistence.SIPUpdater } tests := []struct { name string args params - want *datatypes.Package + want *datatypes.SIP wantErr string }{ { - name: "Updates all package columns", + name: "Updates all SIP columns", args: params{ - pkg: &datatypes.Package{ - Name: "Test package", + sip: &datatypes.SIP{ + Name: "Test SIP", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started, CompletedAt: completed, }, - updater: func(p *datatypes.Package) (*datatypes.Package, error) { + updater: func(p *datatypes.SIP) (*datatypes.SIP, error) { p.ID = 100 // No-op, can't update ID. - p.Name = "Updated package" + p.Name = "Updated SIP" p.WorkflowID = "workflow-2" p.RunID = runID2.String() p.AIPID = aipID2 p.LocationID = locID2 - p.Status = enums.PackageStatusDone + p.Status = enums.SIPStatusDone p.CreatedAt = started2.Time // No-op, can't update CreatedAt. p.StartedAt = started2 p.CompletedAt = completed2 return p, nil }, }, - want: &datatypes.Package{ + want: &datatypes.SIP{ ID: 1, - Name: "Updated package", + Name: "Updated SIP", WorkflowID: "workflow-2", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -235,51 +235,51 @@ func TestUpdatePackage(t *testing.T) { { name: "Only updates selected columns", args: params{ - pkg: &datatypes.Package{ - Name: "Test package", + sip: &datatypes.SIP{ + Name: "Test SIP", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started, }, - updater: func(p *datatypes.Package) (*datatypes.Package, error) { - p.Status = enums.PackageStatusDone + updater: func(p *datatypes.SIP) (*datatypes.SIP, error) { + p.Status = enums.SIPStatusDone p.CompletedAt = completed return p, nil }, }, - want: &datatypes.Package{ + want: &datatypes.SIP{ ID: 1, - Name: "Test package", + Name: "Test SIP", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, CreatedAt: time.Now(), StartedAt: started, CompletedAt: completed, }, }, { - name: "Errors when package to update is not found", + name: "Errors when SIP to update is not found", args: params{ - updater: func(p *datatypes.Package) (*datatypes.Package, error) { + updater: func(p *datatypes.SIP) (*datatypes.SIP, error) { return nil, fmt.Errorf("Bad input") }, }, - wantErr: "not found error: db: pkg not found", + wantErr: "not found error: db: sip not found", }, { name: "Errors when the updater errors", args: params{ - pkg: &datatypes.Package{ - Name: "Test package", + sip: &datatypes.SIP{ + Name: "Test SIP", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, }, - updater: func(p *datatypes.Package) (*datatypes.Package, error) { + updater: func(p *datatypes.SIP) (*datatypes.SIP, error) { return nil, fmt.Errorf("Bad input") }, }, @@ -288,13 +288,13 @@ func TestUpdatePackage(t *testing.T) { { name: "Errors when updater sets an invalid RunID", args: params{ - pkg: &datatypes.Package{ - Name: "Test package", + sip: &datatypes.SIP{ + Name: "Test SIP", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, }, - updater: func(p *datatypes.Package) (*datatypes.Package, error) { + updater: func(p *datatypes.SIP) (*datatypes.SIP, error) { p.RunID = "Bad UUID" return p, nil }, @@ -310,29 +310,29 @@ func TestUpdatePackage(t *testing.T) { ctx := context.Background() var id int - if tt.args.pkg != nil { - pkg := *tt.args.pkg // Make a local copy of pkg. - err := svc.CreatePackage(ctx, &pkg) + if tt.args.sip != nil { + sip := *tt.args.sip // Make a local copy of sip. + err := svc.CreateSIP(ctx, &sip) assert.NilError(t, err) - id = pkg.ID + id = sip.ID } - pp, err := svc.UpdatePackage(ctx, id, tt.args.updater) + sip, err := svc.UpdateSIP(ctx, id, tt.args.updater) if tt.wantErr != "" { assert.Error(t, err, tt.wantErr) return } - assert.DeepEqual(t, pp, tt.want, + assert.DeepEqual(t, sip, tt.want, cmpopts.EquateApproxTime(time.Millisecond*100), - cmpopts.IgnoreUnexported(db.Pkg{}, db.PkgEdges{}), + cmpopts.IgnoreUnexported(db.SIP{}, db.SIPEdges{}), ) }) } } -func TestListPackages(t *testing.T) { +func TestListSIPs(t *testing.T) { t.Parallel() runID := uuid.MustParse("c5f7c35a-d5a6-4e00-b4da-b036ce5b40bc") @@ -375,62 +375,62 @@ func TestListPackages(t *testing.T) { completed2 := sql.NullTime{Time: started2.Time.Add(time.Second), Valid: true} type results struct { - data []*datatypes.Package + data []*datatypes.SIP page *persistence.Page } tests := []struct { - name string - data []*datatypes.Package - packageFilter *persistence.PackageFilter - want results - wantErr string + name string + data []*datatypes.SIP + sipFilter *persistence.SIPFilter + want results + wantErr string }{ { - name: "Returns all packages", - data: []*datatypes.Package{ + name: "Returns all SIPs", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 1, - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, CreatedAt: time.Now(), StartedAt: started, CompletedAt: completed, }, { ID: 2, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -443,42 +443,42 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns first page of packages", - data: []*datatypes.Package{ + name: "Returns first page of SIPs", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ Page: persistence.Page{Limit: 1}, }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 1, - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, CreatedAt: time.Now(), StartedAt: started, CompletedAt: completed, @@ -491,42 +491,42 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns second page of packages", - data: []*datatypes.Package{ + name: "Returns second page of SIPs", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ Page: persistence.Page{Limit: 1, Offset: 1}, }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 2, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -540,15 +540,15 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns packages whose names contain a string", - data: []*datatypes.Package{ + name: "Returns SIPs whose names contain a string", + data: []*datatypes.SIP{ { - Name: "Test package", + Name: "Test SIP", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, @@ -558,16 +558,16 @@ func TestListPackages(t *testing.T) { RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ Name: ref.New("small"), }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 2, Name: "small.zip", @@ -575,7 +575,7 @@ func TestListPackages(t *testing.T) { RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -588,42 +588,42 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns packages filtered by AIPID", - data: []*datatypes.Package{ + name: "Returns SIPs filtered by AIPID", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ AIPID: &aipID2.UUID, }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 2, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -636,42 +636,42 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns packages filtered by LocationID", - data: []*datatypes.Package{ + name: "Returns SIPs filtered by LocationID", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ LocationID: &locID2.UUID, }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 2, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -684,42 +684,42 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns packages filtered by status", - data: []*datatypes.Package{ + name: "Returns SIPs filtered by status", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ - Status: ref.New(enums.PackageStatusInProgress), + sipFilter: &persistence.SIPFilter{ + Status: ref.New(enums.SIPStatusInProgress), }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 2, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -732,30 +732,30 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns packages filtered by CreatedAt", - data: []*datatypes.Package{ + name: "Returns SIPs filtered by CreatedAt", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ CreatedAt: func(t *testing.T) *timerange.Range { r, err := timerange.New( time.Now().Add(-1*time.Minute), @@ -768,27 +768,27 @@ func TestListPackages(t *testing.T) { }(t), }, want: results{ - data: []*datatypes.Package{ + data: []*datatypes.SIP{ { ID: 1, - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, CreatedAt: time.Now(), StartedAt: started, CompletedAt: completed, }, { ID: 2, - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, CreatedAt: time.Now(), StartedAt: started2, CompletedAt: completed2, @@ -801,30 +801,30 @@ func TestListPackages(t *testing.T) { }, }, { - name: "Returns no results when no packages match CreatedAt range", - data: []*datatypes.Package{ + name: "Returns no results when no SIPs match CreatedAt range", + data: []*datatypes.SIP{ { - Name: "Test package 1", + Name: "Test SIP 1", WorkflowID: "workflow-1", RunID: runID.String(), AIPID: aipID, LocationID: locID, - Status: enums.PackageStatusDone, + Status: enums.SIPStatusDone, StartedAt: started, CompletedAt: completed, }, { - Name: "Test package 2", + Name: "Test SIP 2", WorkflowID: "workflow-1", RunID: runID2.String(), AIPID: aipID2, LocationID: locID2, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, StartedAt: started2, CompletedAt: completed2, }, }, - packageFilter: &persistence.PackageFilter{ + sipFilter: &persistence.SIPFilter{ CreatedAt: func(t *testing.T) *timerange.Range { r, err := timerange.New( time.Now().Add(time.Minute), @@ -837,7 +837,7 @@ func TestListPackages(t *testing.T) { }(t), }, want: results{ - data: []*datatypes.Package{}, + data: []*datatypes.SIP{}, page: &persistence.Page{ Limit: entclient.DefaultPageSize, Total: 0, @@ -854,18 +854,18 @@ func TestListPackages(t *testing.T) { ctx := context.Background() if len(tt.data) > 0 { - for _, pkg := range tt.data { - err := svc.CreatePackage(ctx, pkg) + for _, sip := range tt.data { + err := svc.CreateSIP(ctx, sip) assert.NilError(t, err) } } - got, pg, err := svc.ListPackages(ctx, tt.packageFilter) + got, pg, err := svc.ListSIPs(ctx, tt.sipFilter) assert.NilError(t, err) assert.DeepEqual(t, got, tt.want.data, cmpopts.EquateApproxTime(time.Millisecond*100), - cmpopts.IgnoreUnexported(db.Pkg{}, db.PkgEdges{}), + cmpopts.IgnoreUnexported(db.SIP{}, db.SIPEdges{}), ) assert.DeepEqual(t, pg, tt.want.page) }) diff --git a/internal/persistence/ent/db/client.go b/internal/persistence/ent/db/client.go index 2798a3d60..eb29cee85 100644 --- a/internal/persistence/ent/db/client.go +++ b/internal/persistence/ent/db/client.go @@ -15,9 +15,9 @@ import ( "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) // Client is the client that holds all ent builders. @@ -25,12 +25,12 @@ type Client struct { config // Schema is the client for creating, migrating and dropping schema. Schema *migrate.Schema - // Pkg is the client for interacting with the Pkg builders. - Pkg *PkgClient // PreservationAction is the client for interacting with the PreservationAction builders. PreservationAction *PreservationActionClient // PreservationTask is the client for interacting with the PreservationTask builders. PreservationTask *PreservationTaskClient + // SIP is the client for interacting with the SIP builders. + SIP *SIPClient } // NewClient creates a new client configured with the given options. @@ -42,9 +42,9 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) - c.Pkg = NewPkgClient(c.config) c.PreservationAction = NewPreservationActionClient(c.config) c.PreservationTask = NewPreservationTaskClient(c.config) + c.SIP = NewSIPClient(c.config) } type ( @@ -137,9 +137,9 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { return &Tx{ ctx: ctx, config: cfg, - Pkg: NewPkgClient(cfg), PreservationAction: NewPreservationActionClient(cfg), PreservationTask: NewPreservationTaskClient(cfg), + SIP: NewSIPClient(cfg), }, nil } @@ -159,16 +159,16 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) return &Tx{ ctx: ctx, config: cfg, - Pkg: NewPkgClient(cfg), PreservationAction: NewPreservationActionClient(cfg), PreservationTask: NewPreservationTaskClient(cfg), + SIP: NewSIPClient(cfg), }, nil } // Debug returns a new debug-client. It's used to get verbose logging on specific operations. // // client.Debug(). -// Pkg. +// PreservationAction. // Query(). // Count(ctx) func (c *Client) Debug() *Client { @@ -190,182 +190,33 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { - c.Pkg.Use(hooks...) c.PreservationAction.Use(hooks...) c.PreservationTask.Use(hooks...) + c.SIP.Use(hooks...) } // Intercept adds the query interceptors to all the entity clients. // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { - c.Pkg.Intercept(interceptors...) c.PreservationAction.Intercept(interceptors...) c.PreservationTask.Intercept(interceptors...) + c.SIP.Intercept(interceptors...) } // Mutate implements the ent.Mutator interface. func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { switch m := m.(type) { - case *PkgMutation: - return c.Pkg.mutate(ctx, m) case *PreservationActionMutation: return c.PreservationAction.mutate(ctx, m) case *PreservationTaskMutation: return c.PreservationTask.mutate(ctx, m) + case *SIPMutation: + return c.SIP.mutate(ctx, m) default: return nil, fmt.Errorf("db: unknown mutation type %T", m) } } -// PkgClient is a client for the Pkg schema. -type PkgClient struct { - config -} - -// NewPkgClient returns a client for the Pkg from the given config. -func NewPkgClient(c config) *PkgClient { - return &PkgClient{config: c} -} - -// Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `pkg.Hooks(f(g(h())))`. -func (c *PkgClient) Use(hooks ...Hook) { - c.hooks.Pkg = append(c.hooks.Pkg, hooks...) -} - -// Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `pkg.Intercept(f(g(h())))`. -func (c *PkgClient) Intercept(interceptors ...Interceptor) { - c.inters.Pkg = append(c.inters.Pkg, interceptors...) -} - -// Create returns a builder for creating a Pkg entity. -func (c *PkgClient) Create() *PkgCreate { - mutation := newPkgMutation(c.config, OpCreate) - return &PkgCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// CreateBulk returns a builder for creating a bulk of Pkg entities. -func (c *PkgClient) CreateBulk(builders ...*PkgCreate) *PkgCreateBulk { - return &PkgCreateBulk{config: c.config, builders: builders} -} - -// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates -// a builder and applies setFunc on it. -func (c *PkgClient) MapCreateBulk(slice any, setFunc func(*PkgCreate, int)) *PkgCreateBulk { - rv := reflect.ValueOf(slice) - if rv.Kind() != reflect.Slice { - return &PkgCreateBulk{err: fmt.Errorf("calling to PkgClient.MapCreateBulk with wrong type %T, need slice", slice)} - } - builders := make([]*PkgCreate, rv.Len()) - for i := 0; i < rv.Len(); i++ { - builders[i] = c.Create() - setFunc(builders[i], i) - } - return &PkgCreateBulk{config: c.config, builders: builders} -} - -// Update returns an update builder for Pkg. -func (c *PkgClient) Update() *PkgUpdate { - mutation := newPkgMutation(c.config, OpUpdate) - return &PkgUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOne returns an update builder for the given entity. -func (c *PkgClient) UpdateOne(pk *Pkg) *PkgUpdateOne { - mutation := newPkgMutation(c.config, OpUpdateOne, withPkg(pk)) - return &PkgUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOneID returns an update builder for the given id. -func (c *PkgClient) UpdateOneID(id int) *PkgUpdateOne { - mutation := newPkgMutation(c.config, OpUpdateOne, withPkgID(id)) - return &PkgUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// Delete returns a delete builder for Pkg. -func (c *PkgClient) Delete() *PkgDelete { - mutation := newPkgMutation(c.config, OpDelete) - return &PkgDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// DeleteOne returns a builder for deleting the given entity. -func (c *PkgClient) DeleteOne(pk *Pkg) *PkgDeleteOne { - return c.DeleteOneID(pk.ID) -} - -// DeleteOneID returns a builder for deleting the given entity by its id. -func (c *PkgClient) DeleteOneID(id int) *PkgDeleteOne { - builder := c.Delete().Where(pkg.ID(id)) - builder.mutation.id = &id - builder.mutation.op = OpDeleteOne - return &PkgDeleteOne{builder} -} - -// Query returns a query builder for Pkg. -func (c *PkgClient) Query() *PkgQuery { - return &PkgQuery{ - config: c.config, - ctx: &QueryContext{Type: TypePkg}, - inters: c.Interceptors(), - } -} - -// Get returns a Pkg entity by its id. -func (c *PkgClient) Get(ctx context.Context, id int) (*Pkg, error) { - return c.Query().Where(pkg.ID(id)).Only(ctx) -} - -// GetX is like Get, but panics if an error occurs. -func (c *PkgClient) GetX(ctx context.Context, id int) *Pkg { - obj, err := c.Get(ctx, id) - if err != nil { - panic(err) - } - return obj -} - -// QueryPreservationActions queries the preservation_actions edge of a Pkg. -func (c *PkgClient) QueryPreservationActions(pk *Pkg) *PreservationActionQuery { - query := (&PreservationActionClient{config: c.config}).Query() - query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pk.ID - step := sqlgraph.NewStep( - sqlgraph.From(pkg.Table, pkg.FieldID, id), - sqlgraph.To(preservationaction.Table, preservationaction.FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, pkg.PreservationActionsTable, pkg.PreservationActionsColumn), - ) - fromV = sqlgraph.Neighbors(pk.driver.Dialect(), step) - return fromV, nil - } - return query -} - -// Hooks returns the client hooks. -func (c *PkgClient) Hooks() []Hook { - return c.hooks.Pkg -} - -// Interceptors returns the client interceptors. -func (c *PkgClient) Interceptors() []Interceptor { - return c.inters.Pkg -} - -func (c *PkgClient) mutate(ctx context.Context, m *PkgMutation) (Value, error) { - switch m.Op() { - case OpCreate: - return (&PkgCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdate: - return (&PkgUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdateOne: - return (&PkgUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpDelete, OpDeleteOne: - return (&PkgDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) - default: - return nil, fmt.Errorf("db: unknown Pkg mutation op: %q", m.Op()) - } -} - // PreservationActionClient is a client for the PreservationAction schema. type PreservationActionClient struct { config @@ -474,15 +325,15 @@ func (c *PreservationActionClient) GetX(ctx context.Context, id int) *Preservati return obj } -// QueryPackage queries the package edge of a PreservationAction. -func (c *PreservationActionClient) QueryPackage(pa *PreservationAction) *PkgQuery { - query := (&PkgClient{config: c.config}).Query() +// QuerySip queries the sip edge of a PreservationAction. +func (c *PreservationActionClient) QuerySip(pa *PreservationAction) *SIPQuery { + query := (&SIPClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := pa.ID step := sqlgraph.NewStep( sqlgraph.From(preservationaction.Table, preservationaction.FieldID, id), - sqlgraph.To(pkg.Table, pkg.FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, preservationaction.PackageTable, preservationaction.PackageColumn), + sqlgraph.To(sip.Table, sip.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, preservationaction.SipTable, preservationaction.SipColumn), ) fromV = sqlgraph.Neighbors(pa.driver.Dialect(), step) return fromV, nil @@ -680,12 +531,161 @@ func (c *PreservationTaskClient) mutate(ctx context.Context, m *PreservationTask } } +// SIPClient is a client for the SIP schema. +type SIPClient struct { + config +} + +// NewSIPClient returns a client for the SIP from the given config. +func NewSIPClient(c config) *SIPClient { + return &SIPClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `sip.Hooks(f(g(h())))`. +func (c *SIPClient) Use(hooks ...Hook) { + c.hooks.SIP = append(c.hooks.SIP, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `sip.Intercept(f(g(h())))`. +func (c *SIPClient) Intercept(interceptors ...Interceptor) { + c.inters.SIP = append(c.inters.SIP, interceptors...) +} + +// Create returns a builder for creating a SIP entity. +func (c *SIPClient) Create() *SIPCreate { + mutation := newSIPMutation(c.config, OpCreate) + return &SIPCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of SIP entities. +func (c *SIPClient) CreateBulk(builders ...*SIPCreate) *SIPCreateBulk { + return &SIPCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *SIPClient) MapCreateBulk(slice any, setFunc func(*SIPCreate, int)) *SIPCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &SIPCreateBulk{err: fmt.Errorf("calling to SIPClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*SIPCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &SIPCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for SIP. +func (c *SIPClient) Update() *SIPUpdate { + mutation := newSIPMutation(c.config, OpUpdate) + return &SIPUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *SIPClient) UpdateOne(s *SIP) *SIPUpdateOne { + mutation := newSIPMutation(c.config, OpUpdateOne, withSIP(s)) + return &SIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *SIPClient) UpdateOneID(id int) *SIPUpdateOne { + mutation := newSIPMutation(c.config, OpUpdateOne, withSIPID(id)) + return &SIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for SIP. +func (c *SIPClient) Delete() *SIPDelete { + mutation := newSIPMutation(c.config, OpDelete) + return &SIPDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *SIPClient) DeleteOne(s *SIP) *SIPDeleteOne { + return c.DeleteOneID(s.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *SIPClient) DeleteOneID(id int) *SIPDeleteOne { + builder := c.Delete().Where(sip.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &SIPDeleteOne{builder} +} + +// Query returns a query builder for SIP. +func (c *SIPClient) Query() *SIPQuery { + return &SIPQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeSIP}, + inters: c.Interceptors(), + } +} + +// Get returns a SIP entity by its id. +func (c *SIPClient) Get(ctx context.Context, id int) (*SIP, error) { + return c.Query().Where(sip.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *SIPClient) GetX(ctx context.Context, id int) *SIP { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPreservationActions queries the preservation_actions edge of a SIP. +func (c *SIPClient) QueryPreservationActions(s *SIP) *PreservationActionQuery { + query := (&PreservationActionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := s.ID + step := sqlgraph.NewStep( + sqlgraph.From(sip.Table, sip.FieldID, id), + sqlgraph.To(preservationaction.Table, preservationaction.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, sip.PreservationActionsTable, sip.PreservationActionsColumn), + ) + fromV = sqlgraph.Neighbors(s.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *SIPClient) Hooks() []Hook { + return c.hooks.SIP +} + +// Interceptors returns the client interceptors. +func (c *SIPClient) Interceptors() []Interceptor { + return c.inters.SIP +} + +func (c *SIPClient) mutate(ctx context.Context, m *SIPMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&SIPCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&SIPUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&SIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&SIPDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown SIP mutation op: %q", m.Op()) + } +} + // hooks and interceptors per client, for fast access. type ( hooks struct { - Pkg, PreservationAction, PreservationTask []ent.Hook + PreservationAction, PreservationTask, SIP []ent.Hook } inters struct { - Pkg, PreservationAction, PreservationTask []ent.Interceptor + PreservationAction, PreservationTask, SIP []ent.Interceptor } ) diff --git a/internal/persistence/ent/db/ent.go b/internal/persistence/ent/db/ent.go index 32fe5ddd6..77861cfe2 100644 --- a/internal/persistence/ent/db/ent.go +++ b/internal/persistence/ent/db/ent.go @@ -12,9 +12,9 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) // ent aliases to avoid import conflicts in user's code. @@ -75,9 +75,9 @@ var ( func checkColumn(table, column string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ - pkg.Table: pkg.ValidColumn, preservationaction.Table: preservationaction.ValidColumn, preservationtask.Table: preservationtask.ValidColumn, + sip.Table: sip.ValidColumn, }) }) return columnCheck(table, column) diff --git a/internal/persistence/ent/db/hook/hook.go b/internal/persistence/ent/db/hook/hook.go index b06b4b21f..f01e8f67c 100644 --- a/internal/persistence/ent/db/hook/hook.go +++ b/internal/persistence/ent/db/hook/hook.go @@ -9,18 +9,6 @@ import ( "github.com/artefactual-sdps/enduro/internal/persistence/ent/db" ) -// The PkgFunc type is an adapter to allow the use of ordinary -// function as Pkg mutator. -type PkgFunc func(context.Context, *db.PkgMutation) (db.Value, error) - -// Mutate calls f(ctx, m). -func (f PkgFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { - if mv, ok := m.(*db.PkgMutation); ok { - return f(ctx, mv) - } - return nil, fmt.Errorf("unexpected mutation type %T. expect *db.PkgMutation", m) -} - // The PreservationActionFunc type is an adapter to allow the use of ordinary // function as PreservationAction mutator. type PreservationActionFunc func(context.Context, *db.PreservationActionMutation) (db.Value, error) @@ -45,6 +33,18 @@ func (f PreservationTaskFunc) Mutate(ctx context.Context, m db.Mutation) (db.Val return nil, fmt.Errorf("unexpected mutation type %T. expect *db.PreservationTaskMutation", m) } +// The SIPFunc type is an adapter to allow the use of ordinary +// function as SIP mutator. +type SIPFunc func(context.Context, *db.SIPMutation) (db.Value, error) + +// Mutate calls f(ctx, m). +func (f SIPFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { + if mv, ok := m.(*db.SIPMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *db.SIPMutation", m) +} + // Condition is a hook condition function. type Condition func(context.Context, db.Mutation) bool diff --git a/internal/persistence/ent/db/migrate/schema.go b/internal/persistence/ent/db/migrate/schema.go index 77c1c081e..5e56e581e 100644 --- a/internal/persistence/ent/db/migrate/schema.go +++ b/internal/persistence/ent/db/migrate/schema.go @@ -9,60 +9,6 @@ import ( ) var ( - // PackageColumns holds the columns for the "package" table. - PackageColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "name", Type: field.TypeString, Size: 2048}, - {Name: "workflow_id", Type: field.TypeString, Size: 255}, - {Name: "run_id", Type: field.TypeUUID, Unique: true}, - {Name: "aip_id", Type: field.TypeUUID, Nullable: true}, - {Name: "location_id", Type: field.TypeUUID, Nullable: true}, - {Name: "status", Type: field.TypeInt8}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "started_at", Type: field.TypeTime, Nullable: true}, - {Name: "completed_at", Type: field.TypeTime, Nullable: true}, - } - // PackageTable holds the schema information for the "package" table. - PackageTable = &schema.Table{ - Name: "package", - Columns: PackageColumns, - PrimaryKey: []*schema.Column{PackageColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "package_name_idx", - Unique: false, - Columns: []*schema.Column{PackageColumns[1]}, - Annotation: &entsql.IndexAnnotation{ - Prefix: 50, - }, - }, - { - Name: "package_aip_id_idx", - Unique: false, - Columns: []*schema.Column{PackageColumns[4]}, - }, - { - Name: "package_location_id_idx", - Unique: false, - Columns: []*schema.Column{PackageColumns[5]}, - }, - { - Name: "package_status_idx", - Unique: false, - Columns: []*schema.Column{PackageColumns[6]}, - }, - { - Name: "package_created_at_idx", - Unique: false, - Columns: []*schema.Column{PackageColumns[7]}, - }, - { - Name: "package_started_at_idx", - Unique: false, - Columns: []*schema.Column{PackageColumns[8]}, - }, - }, - } // PreservationActionColumns holds the columns for the "preservation_action" table. PreservationActionColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, @@ -71,7 +17,7 @@ var ( {Name: "status", Type: field.TypeInt8}, {Name: "started_at", Type: field.TypeTime, Nullable: true}, {Name: "completed_at", Type: field.TypeTime, Nullable: true}, - {Name: "package_id", Type: field.TypeInt}, + {Name: "sip_id", Type: field.TypeInt}, } // PreservationActionTable holds the schema information for the "preservation_action" table. PreservationActionTable = &schema.Table{ @@ -80,9 +26,9 @@ var ( PrimaryKey: []*schema.Column{PreservationActionColumns[0]}, ForeignKeys: []*schema.ForeignKey{ { - Symbol: "preservation_action_package_preservation_actions", + Symbol: "preservation_action_sip_preservation_actions", Columns: []*schema.Column{PreservationActionColumns[6]}, - RefColumns: []*schema.Column{PackageColumns[0]}, + RefColumns: []*schema.Column{SipColumns[0]}, OnDelete: schema.Cascade, }, }, @@ -112,19 +58,70 @@ var ( }, }, } + // SipColumns holds the columns for the "sip" table. + SipColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Size: 2048}, + {Name: "workflow_id", Type: field.TypeString, Size: 255}, + {Name: "run_id", Type: field.TypeUUID}, + {Name: "aip_id", Type: field.TypeUUID, Nullable: true}, + {Name: "location_id", Type: field.TypeUUID, Nullable: true}, + {Name: "status", Type: field.TypeInt8}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "started_at", Type: field.TypeTime, Nullable: true}, + {Name: "completed_at", Type: field.TypeTime, Nullable: true}, + } + // SipTable holds the schema information for the "sip" table. + SipTable = &schema.Table{ + Name: "sip", + Columns: SipColumns, + PrimaryKey: []*schema.Column{SipColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "sip_name_idx", + Unique: false, + Columns: []*schema.Column{SipColumns[1]}, + Annotation: &entsql.IndexAnnotation{ + Prefix: 50, + }, + }, + { + Name: "sip_aip_id_idx", + Unique: false, + Columns: []*schema.Column{SipColumns[4]}, + }, + { + Name: "sip_location_id_idx", + Unique: false, + Columns: []*schema.Column{SipColumns[5]}, + }, + { + Name: "sip_status_idx", + Unique: false, + Columns: []*schema.Column{SipColumns[6]}, + }, + { + Name: "sip_created_at_idx", + Unique: false, + Columns: []*schema.Column{SipColumns[7]}, + }, + { + Name: "sip_started_at_idx", + Unique: false, + Columns: []*schema.Column{SipColumns[8]}, + }, + }, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ - PackageTable, PreservationActionTable, PreservationTaskTable, + SipTable, } ) func init() { - PackageTable.Annotation = &entsql.Annotation{ - Table: "package", - } - PreservationActionTable.ForeignKeys[0].RefTable = PackageTable + PreservationActionTable.ForeignKeys[0].RefTable = SipTable PreservationActionTable.Annotation = &entsql.Annotation{ Table: "preservation_action", } @@ -132,4 +129,7 @@ func init() { PreservationTaskTable.Annotation = &entsql.Annotation{ Table: "preservation_task", } + SipTable.Annotation = &entsql.Annotation{ + Table: "sip", + } } diff --git a/internal/persistence/ent/db/mutation.go b/internal/persistence/ent/db/mutation.go index 13c7cd1ec..396b7c97f 100644 --- a/internal/persistence/ent/db/mutation.go +++ b/internal/persistence/ent/db/mutation.go @@ -11,10 +11,10 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" "github.com/google/uuid" ) @@ -27,47 +27,46 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. - TypePkg = "Pkg" TypePreservationAction = "PreservationAction" TypePreservationTask = "PreservationTask" + TypeSIP = "SIP" ) -// PkgMutation represents an operation that mutates the Pkg nodes in the graph. -type PkgMutation struct { +// PreservationActionMutation represents an operation that mutates the PreservationAction nodes in the graph. +type PreservationActionMutation struct { config - op Op - typ string - id *int - name *string - workflow_id *string - run_id *uuid.UUID - aip_id *uuid.UUID - location_id *uuid.UUID - status *int8 - addstatus *int8 - created_at *time.Time - started_at *time.Time - completed_at *time.Time - clearedFields map[string]struct{} - preservation_actions map[int]struct{} - removedpreservation_actions map[int]struct{} - clearedpreservation_actions bool - done bool - oldValue func(context.Context) (*Pkg, error) - predicates []predicate.Pkg + op Op + typ string + id *int + workflow_id *string + _type *int8 + add_type *int8 + status *int8 + addstatus *int8 + started_at *time.Time + completed_at *time.Time + clearedFields map[string]struct{} + sip *int + clearedsip bool + tasks map[int]struct{} + removedtasks map[int]struct{} + clearedtasks bool + done bool + oldValue func(context.Context) (*PreservationAction, error) + predicates []predicate.PreservationAction } -var _ ent.Mutation = (*PkgMutation)(nil) +var _ ent.Mutation = (*PreservationActionMutation)(nil) -// pkgOption allows management of the mutation configuration using functional options. -type pkgOption func(*PkgMutation) +// preservationactionOption allows management of the mutation configuration using functional options. +type preservationactionOption func(*PreservationActionMutation) -// newPkgMutation creates new mutation for the Pkg entity. -func newPkgMutation(c config, op Op, opts ...pkgOption) *PkgMutation { - m := &PkgMutation{ +// newPreservationActionMutation creates new mutation for the PreservationAction entity. +func newPreservationActionMutation(c config, op Op, opts ...preservationactionOption) *PreservationActionMutation { + m := &PreservationActionMutation{ config: c, op: op, - typ: TypePkg, + typ: TypePreservationAction, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -76,20 +75,20 @@ func newPkgMutation(c config, op Op, opts ...pkgOption) *PkgMutation { return m } -// withPkgID sets the ID field of the mutation. -func withPkgID(id int) pkgOption { - return func(m *PkgMutation) { +// withPreservationActionID sets the ID field of the mutation. +func withPreservationActionID(id int) preservationactionOption { + return func(m *PreservationActionMutation) { var ( err error once sync.Once - value *Pkg + value *PreservationAction ) - m.oldValue = func(ctx context.Context) (*Pkg, error) { + m.oldValue = func(ctx context.Context) (*PreservationAction, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().Pkg.Get(ctx, id) + value, err = m.Client().PreservationAction.Get(ctx, id) } }) return value, err @@ -98,10 +97,10 @@ func withPkgID(id int) pkgOption { } } -// withPkg sets the old Pkg of the mutation. -func withPkg(node *Pkg) pkgOption { - return func(m *PkgMutation) { - m.oldValue = func(context.Context) (*Pkg, error) { +// withPreservationAction sets the old PreservationAction of the mutation. +func withPreservationAction(node *PreservationAction) preservationactionOption { + return func(m *PreservationActionMutation) { + m.oldValue = func(context.Context) (*PreservationAction, error) { return node, nil } m.id = &node.ID @@ -110,7 +109,7 @@ func withPkg(node *Pkg) pkgOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m PkgMutation) Client() *Client { +func (m PreservationActionMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -118,7 +117,7 @@ func (m PkgMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m PkgMutation) Tx() (*Tx, error) { +func (m PreservationActionMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -129,7 +128,7 @@ func (m PkgMutation) Tx() (*Tx, error) { // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *PkgMutation) ID() (id int, exists bool) { +func (m *PreservationActionMutation) ID() (id int, exists bool) { if m.id == nil { return } @@ -140,7 +139,7 @@ func (m *PkgMutation) ID() (id int, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *PkgMutation) IDs(ctx context.Context) ([]int, error) { +func (m *PreservationActionMutation) IDs(ctx context.Context) ([]int, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -149,55 +148,19 @@ func (m *PkgMutation) IDs(ctx context.Context) ([]int, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().Pkg.Query().Where(m.predicates...).IDs(ctx) + return m.Client().PreservationAction.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetName sets the "name" field. -func (m *PkgMutation) SetName(s string) { - m.name = &s -} - -// Name returns the value of the "name" field in the mutation. -func (m *PkgMutation) Name() (r string, exists bool) { - v := m.name - if v == nil { - return - } - return *v, true -} - -// OldName returns the old "name" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldName(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) - } - return oldValue.Name, nil -} - -// ResetName resets all changes to the "name" field. -func (m *PkgMutation) ResetName() { - m.name = nil -} - // SetWorkflowID sets the "workflow_id" field. -func (m *PkgMutation) SetWorkflowID(s string) { +func (m *PreservationActionMutation) SetWorkflowID(s string) { m.workflow_id = &s } // WorkflowID returns the value of the "workflow_id" field in the mutation. -func (m *PkgMutation) WorkflowID() (r string, exists bool) { +func (m *PreservationActionMutation) WorkflowID() (r string, exists bool) { v := m.workflow_id if v == nil { return @@ -205,10 +168,10 @@ func (m *PkgMutation) WorkflowID() (r string, exists bool) { return *v, true } -// OldWorkflowID returns the old "workflow_id" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldWorkflowID returns the old "workflow_id" field's value of the PreservationAction entity. +// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldWorkflowID(ctx context.Context) (v string, err error) { +func (m *PreservationActionMutation) OldWorkflowID(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldWorkflowID is only allowed on UpdateOne operations") } @@ -223,152 +186,74 @@ func (m *PkgMutation) OldWorkflowID(ctx context.Context) (v string, err error) { } // ResetWorkflowID resets all changes to the "workflow_id" field. -func (m *PkgMutation) ResetWorkflowID() { +func (m *PreservationActionMutation) ResetWorkflowID() { m.workflow_id = nil } -// SetRunID sets the "run_id" field. -func (m *PkgMutation) SetRunID(u uuid.UUID) { - m.run_id = &u +// SetType sets the "type" field. +func (m *PreservationActionMutation) SetType(i int8) { + m._type = &i + m.add_type = nil } -// RunID returns the value of the "run_id" field in the mutation. -func (m *PkgMutation) RunID() (r uuid.UUID, exists bool) { - v := m.run_id +// GetType returns the value of the "type" field in the mutation. +func (m *PreservationActionMutation) GetType() (r int8, exists bool) { + v := m._type if v == nil { return } return *v, true } -// OldRunID returns the old "run_id" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldType returns the old "type" field's value of the PreservationAction entity. +// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldRunID(ctx context.Context) (v uuid.UUID, err error) { +func (m *PreservationActionMutation) OldType(ctx context.Context) (v int8, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRunID is only allowed on UpdateOne operations") + return v, errors.New("OldType is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRunID requires an ID field in the mutation") + return v, errors.New("OldType requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldRunID: %w", err) - } - return oldValue.RunID, nil -} - -// ResetRunID resets all changes to the "run_id" field. -func (m *PkgMutation) ResetRunID() { - m.run_id = nil -} - -// SetAipID sets the "aip_id" field. -func (m *PkgMutation) SetAipID(u uuid.UUID) { - m.aip_id = &u -} - -// AipID returns the value of the "aip_id" field in the mutation. -func (m *PkgMutation) AipID() (r uuid.UUID, exists bool) { - v := m.aip_id - if v == nil { - return + return v, fmt.Errorf("querying old value for OldType: %w", err) } - return *v, true + return oldValue.Type, nil } -// OldAipID returns the old "aip_id" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldAipID(ctx context.Context) (v uuid.UUID, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldAipID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldAipID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldAipID: %w", err) +// AddType adds i to the "type" field. +func (m *PreservationActionMutation) AddType(i int8) { + if m.add_type != nil { + *m.add_type += i + } else { + m.add_type = &i } - return oldValue.AipID, nil -} - -// ClearAipID clears the value of the "aip_id" field. -func (m *PkgMutation) ClearAipID() { - m.aip_id = nil - m.clearedFields[pkg.FieldAipID] = struct{}{} -} - -// AipIDCleared returns if the "aip_id" field was cleared in this mutation. -func (m *PkgMutation) AipIDCleared() bool { - _, ok := m.clearedFields[pkg.FieldAipID] - return ok -} - -// ResetAipID resets all changes to the "aip_id" field. -func (m *PkgMutation) ResetAipID() { - m.aip_id = nil - delete(m.clearedFields, pkg.FieldAipID) -} - -// SetLocationID sets the "location_id" field. -func (m *PkgMutation) SetLocationID(u uuid.UUID) { - m.location_id = &u } -// LocationID returns the value of the "location_id" field in the mutation. -func (m *PkgMutation) LocationID() (r uuid.UUID, exists bool) { - v := m.location_id +// AddedType returns the value that was added to the "type" field in this mutation. +func (m *PreservationActionMutation) AddedType() (r int8, exists bool) { + v := m.add_type if v == nil { return } return *v, true } -// OldLocationID returns the old "location_id" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldLocationID(ctx context.Context) (v uuid.UUID, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldLocationID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldLocationID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldLocationID: %w", err) - } - return oldValue.LocationID, nil -} - -// ClearLocationID clears the value of the "location_id" field. -func (m *PkgMutation) ClearLocationID() { - m.location_id = nil - m.clearedFields[pkg.FieldLocationID] = struct{}{} -} - -// LocationIDCleared returns if the "location_id" field was cleared in this mutation. -func (m *PkgMutation) LocationIDCleared() bool { - _, ok := m.clearedFields[pkg.FieldLocationID] - return ok -} - -// ResetLocationID resets all changes to the "location_id" field. -func (m *PkgMutation) ResetLocationID() { - m.location_id = nil - delete(m.clearedFields, pkg.FieldLocationID) +// ResetType resets all changes to the "type" field. +func (m *PreservationActionMutation) ResetType() { + m._type = nil + m.add_type = nil } // SetStatus sets the "status" field. -func (m *PkgMutation) SetStatus(i int8) { +func (m *PreservationActionMutation) SetStatus(i int8) { m.status = &i m.addstatus = nil } // Status returns the value of the "status" field in the mutation. -func (m *PkgMutation) Status() (r int8, exists bool) { +func (m *PreservationActionMutation) Status() (r int8, exists bool) { v := m.status if v == nil { return @@ -376,10 +261,10 @@ func (m *PkgMutation) Status() (r int8, exists bool) { return *v, true } -// OldStatus returns the old "status" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldStatus returns the old "status" field's value of the PreservationAction entity. +// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldStatus(ctx context.Context) (v int8, err error) { +func (m *PreservationActionMutation) OldStatus(ctx context.Context) (v int8, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldStatus is only allowed on UpdateOne operations") } @@ -394,7 +279,7 @@ func (m *PkgMutation) OldStatus(ctx context.Context) (v int8, err error) { } // AddStatus adds i to the "status" field. -func (m *PkgMutation) AddStatus(i int8) { +func (m *PreservationActionMutation) AddStatus(i int8) { if m.addstatus != nil { *m.addstatus += i } else { @@ -403,7 +288,7 @@ func (m *PkgMutation) AddStatus(i int8) { } // AddedStatus returns the value that was added to the "status" field in this mutation. -func (m *PkgMutation) AddedStatus() (r int8, exists bool) { +func (m *PreservationActionMutation) AddedStatus() (r int8, exists bool) { v := m.addstatus if v == nil { return @@ -412,54 +297,18 @@ func (m *PkgMutation) AddedStatus() (r int8, exists bool) { } // ResetStatus resets all changes to the "status" field. -func (m *PkgMutation) ResetStatus() { +func (m *PreservationActionMutation) ResetStatus() { m.status = nil m.addstatus = nil } -// SetCreatedAt sets the "created_at" field. -func (m *PkgMutation) SetCreatedAt(t time.Time) { - m.created_at = &t -} - -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *PkgMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at - if v == nil { - return - } - return *v, true -} - -// OldCreatedAt returns the old "created_at" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) - } - return oldValue.CreatedAt, nil -} - -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *PkgMutation) ResetCreatedAt() { - m.created_at = nil -} - // SetStartedAt sets the "started_at" field. -func (m *PkgMutation) SetStartedAt(t time.Time) { +func (m *PreservationActionMutation) SetStartedAt(t time.Time) { m.started_at = &t } // StartedAt returns the value of the "started_at" field in the mutation. -func (m *PkgMutation) StartedAt() (r time.Time, exists bool) { +func (m *PreservationActionMutation) StartedAt() (r time.Time, exists bool) { v := m.started_at if v == nil { return @@ -467,10 +316,10 @@ func (m *PkgMutation) StartedAt() (r time.Time, exists bool) { return *v, true } -// OldStartedAt returns the old "started_at" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldStartedAt returns the old "started_at" field's value of the PreservationAction entity. +// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { +func (m *PreservationActionMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") } @@ -485,30 +334,30 @@ func (m *PkgMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) } // ClearStartedAt clears the value of the "started_at" field. -func (m *PkgMutation) ClearStartedAt() { +func (m *PreservationActionMutation) ClearStartedAt() { m.started_at = nil - m.clearedFields[pkg.FieldStartedAt] = struct{}{} + m.clearedFields[preservationaction.FieldStartedAt] = struct{}{} } // StartedAtCleared returns if the "started_at" field was cleared in this mutation. -func (m *PkgMutation) StartedAtCleared() bool { - _, ok := m.clearedFields[pkg.FieldStartedAt] +func (m *PreservationActionMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[preservationaction.FieldStartedAt] return ok } // ResetStartedAt resets all changes to the "started_at" field. -func (m *PkgMutation) ResetStartedAt() { +func (m *PreservationActionMutation) ResetStartedAt() { m.started_at = nil - delete(m.clearedFields, pkg.FieldStartedAt) + delete(m.clearedFields, preservationaction.FieldStartedAt) } // SetCompletedAt sets the "completed_at" field. -func (m *PkgMutation) SetCompletedAt(t time.Time) { +func (m *PreservationActionMutation) SetCompletedAt(t time.Time) { m.completed_at = &t } // CompletedAt returns the value of the "completed_at" field in the mutation. -func (m *PkgMutation) CompletedAt() (r time.Time, exists bool) { +func (m *PreservationActionMutation) CompletedAt() (r time.Time, exists bool) { v := m.completed_at if v == nil { return @@ -516,10 +365,10 @@ func (m *PkgMutation) CompletedAt() (r time.Time, exists bool) { return *v, true } -// OldCompletedAt returns the old "completed_at" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldCompletedAt returns the old "completed_at" field's value of the PreservationAction entity. +// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldCompletedAt(ctx context.Context) (v time.Time, err error) { +func (m *PreservationActionMutation) OldCompletedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCompletedAt is only allowed on UpdateOne operations") } @@ -534,86 +383,149 @@ func (m *PkgMutation) OldCompletedAt(ctx context.Context) (v time.Time, err erro } // ClearCompletedAt clears the value of the "completed_at" field. -func (m *PkgMutation) ClearCompletedAt() { +func (m *PreservationActionMutation) ClearCompletedAt() { m.completed_at = nil - m.clearedFields[pkg.FieldCompletedAt] = struct{}{} + m.clearedFields[preservationaction.FieldCompletedAt] = struct{}{} } // CompletedAtCleared returns if the "completed_at" field was cleared in this mutation. -func (m *PkgMutation) CompletedAtCleared() bool { - _, ok := m.clearedFields[pkg.FieldCompletedAt] +func (m *PreservationActionMutation) CompletedAtCleared() bool { + _, ok := m.clearedFields[preservationaction.FieldCompletedAt] return ok } // ResetCompletedAt resets all changes to the "completed_at" field. -func (m *PkgMutation) ResetCompletedAt() { +func (m *PreservationActionMutation) ResetCompletedAt() { m.completed_at = nil - delete(m.clearedFields, pkg.FieldCompletedAt) + delete(m.clearedFields, preservationaction.FieldCompletedAt) } -// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by ids. -func (m *PkgMutation) AddPreservationActionIDs(ids ...int) { - if m.preservation_actions == nil { - m.preservation_actions = make(map[int]struct{}) - } - for i := range ids { - m.preservation_actions[ids[i]] = struct{}{} +// SetSipID sets the "sip_id" field. +func (m *PreservationActionMutation) SetSipID(i int) { + m.sip = &i +} + +// SipID returns the value of the "sip_id" field in the mutation. +func (m *PreservationActionMutation) SipID() (r int, exists bool) { + v := m.sip + if v == nil { + return } + return *v, true } -// ClearPreservationActions clears the "preservation_actions" edge to the PreservationAction entity. -func (m *PkgMutation) ClearPreservationActions() { - m.clearedpreservation_actions = true +// OldSipID returns the old "sip_id" field's value of the PreservationAction entity. +// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PreservationActionMutation) OldSipID(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSipID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSipID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSipID: %w", err) + } + return oldValue.SipID, nil } -// PreservationActionsCleared reports if the "preservation_actions" edge to the PreservationAction entity was cleared. -func (m *PkgMutation) PreservationActionsCleared() bool { - return m.clearedpreservation_actions +// ResetSipID resets all changes to the "sip_id" field. +func (m *PreservationActionMutation) ResetSipID() { + m.sip = nil } -// RemovePreservationActionIDs removes the "preservation_actions" edge to the PreservationAction entity by IDs. -func (m *PkgMutation) RemovePreservationActionIDs(ids ...int) { - if m.removedpreservation_actions == nil { - m.removedpreservation_actions = make(map[int]struct{}) +// ClearSip clears the "sip" edge to the SIP entity. +func (m *PreservationActionMutation) ClearSip() { + m.clearedsip = true + m.clearedFields[preservationaction.FieldSipID] = struct{}{} +} + +// SipCleared reports if the "sip" edge to the SIP entity was cleared. +func (m *PreservationActionMutation) SipCleared() bool { + return m.clearedsip +} + +// SipIDs returns the "sip" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// SipID instead. It exists only for internal usage by the builders. +func (m *PreservationActionMutation) SipIDs() (ids []int) { + if id := m.sip; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetSip resets all changes to the "sip" edge. +func (m *PreservationActionMutation) ResetSip() { + m.sip = nil + m.clearedsip = false +} + +// AddTaskIDs adds the "tasks" edge to the PreservationTask entity by ids. +func (m *PreservationActionMutation) AddTaskIDs(ids ...int) { + if m.tasks == nil { + m.tasks = make(map[int]struct{}) } for i := range ids { - delete(m.preservation_actions, ids[i]) - m.removedpreservation_actions[ids[i]] = struct{}{} + m.tasks[ids[i]] = struct{}{} } } -// RemovedPreservationActions returns the removed IDs of the "preservation_actions" edge to the PreservationAction entity. -func (m *PkgMutation) RemovedPreservationActionsIDs() (ids []int) { - for id := range m.removedpreservation_actions { +// ClearTasks clears the "tasks" edge to the PreservationTask entity. +func (m *PreservationActionMutation) ClearTasks() { + m.clearedtasks = true +} + +// TasksCleared reports if the "tasks" edge to the PreservationTask entity was cleared. +func (m *PreservationActionMutation) TasksCleared() bool { + return m.clearedtasks +} + +// RemoveTaskIDs removes the "tasks" edge to the PreservationTask entity by IDs. +func (m *PreservationActionMutation) RemoveTaskIDs(ids ...int) { + if m.removedtasks == nil { + m.removedtasks = make(map[int]struct{}) + } + for i := range ids { + delete(m.tasks, ids[i]) + m.removedtasks[ids[i]] = struct{}{} + } +} + +// RemovedTasks returns the removed IDs of the "tasks" edge to the PreservationTask entity. +func (m *PreservationActionMutation) RemovedTasksIDs() (ids []int) { + for id := range m.removedtasks { ids = append(ids, id) } return } -// PreservationActionsIDs returns the "preservation_actions" edge IDs in the mutation. -func (m *PkgMutation) PreservationActionsIDs() (ids []int) { - for id := range m.preservation_actions { +// TasksIDs returns the "tasks" edge IDs in the mutation. +func (m *PreservationActionMutation) TasksIDs() (ids []int) { + for id := range m.tasks { ids = append(ids, id) } return } -// ResetPreservationActions resets all changes to the "preservation_actions" edge. -func (m *PkgMutation) ResetPreservationActions() { - m.preservation_actions = nil - m.clearedpreservation_actions = false - m.removedpreservation_actions = nil +// ResetTasks resets all changes to the "tasks" edge. +func (m *PreservationActionMutation) ResetTasks() { + m.tasks = nil + m.clearedtasks = false + m.removedtasks = nil } -// Where appends a list predicates to the PkgMutation builder. -func (m *PkgMutation) Where(ps ...predicate.Pkg) { +// Where appends a list predicates to the PreservationActionMutation builder. +func (m *PreservationActionMutation) Where(ps ...predicate.PreservationAction) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the PkgMutation builder. Using this method, +// WhereP appends storage-level predicates to the PreservationActionMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *PkgMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.Pkg, len(ps)) +func (m *PreservationActionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PreservationAction, len(ps)) for i := range ps { p[i] = ps[i] } @@ -621,51 +533,42 @@ func (m *PkgMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *PkgMutation) Op() Op { +func (m *PreservationActionMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *PkgMutation) SetOp(op Op) { +func (m *PreservationActionMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (Pkg). -func (m *PkgMutation) Type() string { +// Type returns the node type of this mutation (PreservationAction). +func (m *PreservationActionMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *PkgMutation) Fields() []string { - fields := make([]string, 0, 9) - if m.name != nil { - fields = append(fields, pkg.FieldName) - } +func (m *PreservationActionMutation) Fields() []string { + fields := make([]string, 0, 6) if m.workflow_id != nil { - fields = append(fields, pkg.FieldWorkflowID) - } - if m.run_id != nil { - fields = append(fields, pkg.FieldRunID) - } - if m.aip_id != nil { - fields = append(fields, pkg.FieldAipID) + fields = append(fields, preservationaction.FieldWorkflowID) } - if m.location_id != nil { - fields = append(fields, pkg.FieldLocationID) + if m._type != nil { + fields = append(fields, preservationaction.FieldType) } if m.status != nil { - fields = append(fields, pkg.FieldStatus) - } - if m.created_at != nil { - fields = append(fields, pkg.FieldCreatedAt) + fields = append(fields, preservationaction.FieldStatus) } if m.started_at != nil { - fields = append(fields, pkg.FieldStartedAt) + fields = append(fields, preservationaction.FieldStartedAt) } if m.completed_at != nil { - fields = append(fields, pkg.FieldCompletedAt) + fields = append(fields, preservationaction.FieldCompletedAt) + } + if m.sip != nil { + fields = append(fields, preservationaction.FieldSipID) } return fields } @@ -673,26 +576,20 @@ func (m *PkgMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *PkgMutation) Field(name string) (ent.Value, bool) { +func (m *PreservationActionMutation) Field(name string) (ent.Value, bool) { switch name { - case pkg.FieldName: - return m.Name() - case pkg.FieldWorkflowID: + case preservationaction.FieldWorkflowID: return m.WorkflowID() - case pkg.FieldRunID: - return m.RunID() - case pkg.FieldAipID: - return m.AipID() - case pkg.FieldLocationID: - return m.LocationID() - case pkg.FieldStatus: + case preservationaction.FieldType: + return m.GetType() + case preservationaction.FieldStatus: return m.Status() - case pkg.FieldCreatedAt: - return m.CreatedAt() - case pkg.FieldStartedAt: + case preservationaction.FieldStartedAt: return m.StartedAt() - case pkg.FieldCompletedAt: + case preservationaction.FieldCompletedAt: return m.CompletedAt() + case preservationaction.FieldSipID: + return m.SipID() } return nil, false } @@ -700,108 +597,84 @@ func (m *PkgMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *PkgMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *PreservationActionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case pkg.FieldName: - return m.OldName(ctx) - case pkg.FieldWorkflowID: + case preservationaction.FieldWorkflowID: return m.OldWorkflowID(ctx) - case pkg.FieldRunID: - return m.OldRunID(ctx) - case pkg.FieldAipID: - return m.OldAipID(ctx) - case pkg.FieldLocationID: - return m.OldLocationID(ctx) - case pkg.FieldStatus: + case preservationaction.FieldType: + return m.OldType(ctx) + case preservationaction.FieldStatus: return m.OldStatus(ctx) - case pkg.FieldCreatedAt: - return m.OldCreatedAt(ctx) - case pkg.FieldStartedAt: + case preservationaction.FieldStartedAt: return m.OldStartedAt(ctx) - case pkg.FieldCompletedAt: + case preservationaction.FieldCompletedAt: return m.OldCompletedAt(ctx) + case preservationaction.FieldSipID: + return m.OldSipID(ctx) } - return nil, fmt.Errorf("unknown Pkg field %s", name) + return nil, fmt.Errorf("unknown PreservationAction field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PkgMutation) SetField(name string, value ent.Value) error { +func (m *PreservationActionMutation) SetField(name string, value ent.Value) error { switch name { - case pkg.FieldName: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetName(v) - return nil - case pkg.FieldWorkflowID: + case preservationaction.FieldWorkflowID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetWorkflowID(v) return nil - case pkg.FieldRunID: - v, ok := value.(uuid.UUID) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRunID(v) - return nil - case pkg.FieldAipID: - v, ok := value.(uuid.UUID) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetAipID(v) - return nil - case pkg.FieldLocationID: - v, ok := value.(uuid.UUID) + case preservationaction.FieldType: + v, ok := value.(int8) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetLocationID(v) + m.SetType(v) return nil - case pkg.FieldStatus: + case preservationaction.FieldStatus: v, ok := value.(int8) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetStatus(v) return nil - case pkg.FieldCreatedAt: + case preservationaction.FieldStartedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCreatedAt(v) + m.SetStartedAt(v) return nil - case pkg.FieldStartedAt: + case preservationaction.FieldCompletedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetStartedAt(v) + m.SetCompletedAt(v) return nil - case pkg.FieldCompletedAt: - v, ok := value.(time.Time) + case preservationaction.FieldSipID: + v, ok := value.(int) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCompletedAt(v) + m.SetSipID(v) return nil } - return fmt.Errorf("unknown Pkg field %s", name) + return fmt.Errorf("unknown PreservationAction field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *PkgMutation) AddedFields() []string { +func (m *PreservationActionMutation) AddedFields() []string { var fields []string + if m.add_type != nil { + fields = append(fields, preservationaction.FieldType) + } if m.addstatus != nil { - fields = append(fields, pkg.FieldStatus) + fields = append(fields, preservationaction.FieldStatus) } return fields } @@ -809,9 +682,11 @@ func (m *PkgMutation) AddedFields() []string { // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *PkgMutation) AddedField(name string) (ent.Value, bool) { +func (m *PreservationActionMutation) AddedField(name string) (ent.Value, bool) { switch name { - case pkg.FieldStatus: + case preservationaction.FieldType: + return m.AddedType() + case preservationaction.FieldStatus: return m.AddedStatus() } return nil, false @@ -820,9 +695,16 @@ func (m *PkgMutation) AddedField(name string) (ent.Value, bool) { // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PkgMutation) AddField(name string, value ent.Value) error { +func (m *PreservationActionMutation) AddField(name string, value ent.Value) error { switch name { - case pkg.FieldStatus: + case preservationaction.FieldType: + v, ok := value.(int8) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddType(v) + return nil + case preservationaction.FieldStatus: v, ok := value.(int8) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) @@ -830,106 +712,92 @@ func (m *PkgMutation) AddField(name string, value ent.Value) error { m.AddStatus(v) return nil } - return fmt.Errorf("unknown Pkg numeric field %s", name) + return fmt.Errorf("unknown PreservationAction numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *PkgMutation) ClearedFields() []string { +func (m *PreservationActionMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(pkg.FieldAipID) { - fields = append(fields, pkg.FieldAipID) - } - if m.FieldCleared(pkg.FieldLocationID) { - fields = append(fields, pkg.FieldLocationID) - } - if m.FieldCleared(pkg.FieldStartedAt) { - fields = append(fields, pkg.FieldStartedAt) + if m.FieldCleared(preservationaction.FieldStartedAt) { + fields = append(fields, preservationaction.FieldStartedAt) } - if m.FieldCleared(pkg.FieldCompletedAt) { - fields = append(fields, pkg.FieldCompletedAt) + if m.FieldCleared(preservationaction.FieldCompletedAt) { + fields = append(fields, preservationaction.FieldCompletedAt) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *PkgMutation) FieldCleared(name string) bool { +func (m *PreservationActionMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *PkgMutation) ClearField(name string) error { +func (m *PreservationActionMutation) ClearField(name string) error { switch name { - case pkg.FieldAipID: - m.ClearAipID() - return nil - case pkg.FieldLocationID: - m.ClearLocationID() - return nil - case pkg.FieldStartedAt: + case preservationaction.FieldStartedAt: m.ClearStartedAt() return nil - case pkg.FieldCompletedAt: + case preservationaction.FieldCompletedAt: m.ClearCompletedAt() return nil } - return fmt.Errorf("unknown Pkg nullable field %s", name) + return fmt.Errorf("unknown PreservationAction nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *PkgMutation) ResetField(name string) error { +func (m *PreservationActionMutation) ResetField(name string) error { switch name { - case pkg.FieldName: - m.ResetName() - return nil - case pkg.FieldWorkflowID: + case preservationaction.FieldWorkflowID: m.ResetWorkflowID() return nil - case pkg.FieldRunID: - m.ResetRunID() - return nil - case pkg.FieldAipID: - m.ResetAipID() - return nil - case pkg.FieldLocationID: - m.ResetLocationID() + case preservationaction.FieldType: + m.ResetType() return nil - case pkg.FieldStatus: + case preservationaction.FieldStatus: m.ResetStatus() return nil - case pkg.FieldCreatedAt: - m.ResetCreatedAt() - return nil - case pkg.FieldStartedAt: + case preservationaction.FieldStartedAt: m.ResetStartedAt() return nil - case pkg.FieldCompletedAt: + case preservationaction.FieldCompletedAt: m.ResetCompletedAt() return nil + case preservationaction.FieldSipID: + m.ResetSipID() + return nil } - return fmt.Errorf("unknown Pkg field %s", name) + return fmt.Errorf("unknown PreservationAction field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *PkgMutation) AddedEdges() []string { - edges := make([]string, 0, 1) - if m.preservation_actions != nil { - edges = append(edges, pkg.EdgePreservationActions) +func (m *PreservationActionMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.sip != nil { + edges = append(edges, preservationaction.EdgeSip) + } + if m.tasks != nil { + edges = append(edges, preservationaction.EdgeTasks) } return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *PkgMutation) AddedIDs(name string) []ent.Value { +func (m *PreservationActionMutation) AddedIDs(name string) []ent.Value { switch name { - case pkg.EdgePreservationActions: - ids := make([]ent.Value, 0, len(m.preservation_actions)) - for id := range m.preservation_actions { + case preservationaction.EdgeSip: + if id := m.sip; id != nil { + return []ent.Value{*id} + } + case preservationaction.EdgeTasks: + ids := make([]ent.Value, 0, len(m.tasks)) + for id := range m.tasks { ids = append(ids, id) } return ids @@ -938,21 +806,21 @@ func (m *PkgMutation) AddedIDs(name string) []ent.Value { } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *PkgMutation) RemovedEdges() []string { - edges := make([]string, 0, 1) - if m.removedpreservation_actions != nil { - edges = append(edges, pkg.EdgePreservationActions) +func (m *PreservationActionMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + if m.removedtasks != nil { + edges = append(edges, preservationaction.EdgeTasks) } return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *PkgMutation) RemovedIDs(name string) []ent.Value { +func (m *PreservationActionMutation) RemovedIDs(name string) []ent.Value { switch name { - case pkg.EdgePreservationActions: - ids := make([]ent.Value, 0, len(m.removedpreservation_actions)) - for id := range m.removedpreservation_actions { + case preservationaction.EdgeTasks: + ids := make([]ent.Value, 0, len(m.removedtasks)) + for id := range m.removedtasks { ids = append(ids, id) } return ids @@ -961,78 +829,86 @@ func (m *PkgMutation) RemovedIDs(name string) []ent.Value { } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *PkgMutation) ClearedEdges() []string { - edges := make([]string, 0, 1) - if m.clearedpreservation_actions { - edges = append(edges, pkg.EdgePreservationActions) +func (m *PreservationActionMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedsip { + edges = append(edges, preservationaction.EdgeSip) } - return edges -} + if m.clearedtasks { + edges = append(edges, preservationaction.EdgeTasks) + } + return edges +} // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *PkgMutation) EdgeCleared(name string) bool { +func (m *PreservationActionMutation) EdgeCleared(name string) bool { switch name { - case pkg.EdgePreservationActions: - return m.clearedpreservation_actions + case preservationaction.EdgeSip: + return m.clearedsip + case preservationaction.EdgeTasks: + return m.clearedtasks } return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *PkgMutation) ClearEdge(name string) error { +func (m *PreservationActionMutation) ClearEdge(name string) error { switch name { + case preservationaction.EdgeSip: + m.ClearSip() + return nil } - return fmt.Errorf("unknown Pkg unique edge %s", name) + return fmt.Errorf("unknown PreservationAction unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *PkgMutation) ResetEdge(name string) error { +func (m *PreservationActionMutation) ResetEdge(name string) error { switch name { - case pkg.EdgePreservationActions: - m.ResetPreservationActions() + case preservationaction.EdgeSip: + m.ResetSip() + return nil + case preservationaction.EdgeTasks: + m.ResetTasks() return nil } - return fmt.Errorf("unknown Pkg edge %s", name) + return fmt.Errorf("unknown PreservationAction edge %s", name) } -// PreservationActionMutation represents an operation that mutates the PreservationAction nodes in the graph. -type PreservationActionMutation struct { +// PreservationTaskMutation represents an operation that mutates the PreservationTask nodes in the graph. +type PreservationTaskMutation struct { config - op Op - typ string - id *int - workflow_id *string - _type *int8 - add_type *int8 - status *int8 - addstatus *int8 - started_at *time.Time - completed_at *time.Time - clearedFields map[string]struct{} - _package *int - cleared_package bool - tasks map[int]struct{} - removedtasks map[int]struct{} - clearedtasks bool - done bool - oldValue func(context.Context) (*PreservationAction, error) - predicates []predicate.PreservationAction + op Op + typ string + id *int + task_id *uuid.UUID + name *string + status *int8 + addstatus *int8 + started_at *time.Time + completed_at *time.Time + note *string + clearedFields map[string]struct{} + action *int + clearedaction bool + done bool + oldValue func(context.Context) (*PreservationTask, error) + predicates []predicate.PreservationTask } -var _ ent.Mutation = (*PreservationActionMutation)(nil) +var _ ent.Mutation = (*PreservationTaskMutation)(nil) -// preservationactionOption allows management of the mutation configuration using functional options. -type preservationactionOption func(*PreservationActionMutation) +// preservationtaskOption allows management of the mutation configuration using functional options. +type preservationtaskOption func(*PreservationTaskMutation) -// newPreservationActionMutation creates new mutation for the PreservationAction entity. -func newPreservationActionMutation(c config, op Op, opts ...preservationactionOption) *PreservationActionMutation { - m := &PreservationActionMutation{ +// newPreservationTaskMutation creates new mutation for the PreservationTask entity. +func newPreservationTaskMutation(c config, op Op, opts ...preservationtaskOption) *PreservationTaskMutation { + m := &PreservationTaskMutation{ config: c, op: op, - typ: TypePreservationAction, + typ: TypePreservationTask, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -1041,20 +917,20 @@ func newPreservationActionMutation(c config, op Op, opts ...preservationactionOp return m } -// withPreservationActionID sets the ID field of the mutation. -func withPreservationActionID(id int) preservationactionOption { - return func(m *PreservationActionMutation) { +// withPreservationTaskID sets the ID field of the mutation. +func withPreservationTaskID(id int) preservationtaskOption { + return func(m *PreservationTaskMutation) { var ( err error once sync.Once - value *PreservationAction + value *PreservationTask ) - m.oldValue = func(ctx context.Context) (*PreservationAction, error) { + m.oldValue = func(ctx context.Context) (*PreservationTask, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().PreservationAction.Get(ctx, id) + value, err = m.Client().PreservationTask.Get(ctx, id) } }) return value, err @@ -1063,10 +939,10 @@ func withPreservationActionID(id int) preservationactionOption { } } -// withPreservationAction sets the old PreservationAction of the mutation. -func withPreservationAction(node *PreservationAction) preservationactionOption { - return func(m *PreservationActionMutation) { - m.oldValue = func(context.Context) (*PreservationAction, error) { +// withPreservationTask sets the old PreservationTask of the mutation. +func withPreservationTask(node *PreservationTask) preservationtaskOption { + return func(m *PreservationTaskMutation) { + m.oldValue = func(context.Context) (*PreservationTask, error) { return node, nil } m.id = &node.ID @@ -1075,7 +951,7 @@ func withPreservationAction(node *PreservationAction) preservationactionOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m PreservationActionMutation) Client() *Client { +func (m PreservationTaskMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -1083,7 +959,7 @@ func (m PreservationActionMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m PreservationActionMutation) Tx() (*Tx, error) { +func (m PreservationTaskMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -1094,7 +970,7 @@ func (m PreservationActionMutation) Tx() (*Tx, error) { // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *PreservationActionMutation) ID() (id int, exists bool) { +func (m *PreservationTaskMutation) ID() (id int, exists bool) { if m.id == nil { return } @@ -1105,7 +981,7 @@ func (m *PreservationActionMutation) ID() (id int, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *PreservationActionMutation) IDs(ctx context.Context) ([]int, error) { +func (m *PreservationTaskMutation) IDs(ctx context.Context) ([]int, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -1114,112 +990,92 @@ func (m *PreservationActionMutation) IDs(ctx context.Context) ([]int, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().PreservationAction.Query().Where(m.predicates...).IDs(ctx) + return m.Client().PreservationTask.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } -// SetWorkflowID sets the "workflow_id" field. -func (m *PreservationActionMutation) SetWorkflowID(s string) { - m.workflow_id = &s +// SetTaskID sets the "task_id" field. +func (m *PreservationTaskMutation) SetTaskID(u uuid.UUID) { + m.task_id = &u } -// WorkflowID returns the value of the "workflow_id" field in the mutation. -func (m *PreservationActionMutation) WorkflowID() (r string, exists bool) { - v := m.workflow_id +// TaskID returns the value of the "task_id" field in the mutation. +func (m *PreservationTaskMutation) TaskID() (r uuid.UUID, exists bool) { + v := m.task_id if v == nil { return } return *v, true } -// OldWorkflowID returns the old "workflow_id" field's value of the PreservationAction entity. -// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// OldTaskID returns the old "task_id" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationActionMutation) OldWorkflowID(ctx context.Context) (v string, err error) { +func (m *PreservationTaskMutation) OldTaskID(ctx context.Context) (v uuid.UUID, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldWorkflowID is only allowed on UpdateOne operations") + return v, errors.New("OldTaskID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldWorkflowID requires an ID field in the mutation") + return v, errors.New("OldTaskID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldWorkflowID: %w", err) + return v, fmt.Errorf("querying old value for OldTaskID: %w", err) } - return oldValue.WorkflowID, nil + return oldValue.TaskID, nil } -// ResetWorkflowID resets all changes to the "workflow_id" field. -func (m *PreservationActionMutation) ResetWorkflowID() { - m.workflow_id = nil +// ResetTaskID resets all changes to the "task_id" field. +func (m *PreservationTaskMutation) ResetTaskID() { + m.task_id = nil } -// SetType sets the "type" field. -func (m *PreservationActionMutation) SetType(i int8) { - m._type = &i - m.add_type = nil +// SetName sets the "name" field. +func (m *PreservationTaskMutation) SetName(s string) { + m.name = &s } -// GetType returns the value of the "type" field in the mutation. -func (m *PreservationActionMutation) GetType() (r int8, exists bool) { - v := m._type +// Name returns the value of the "name" field in the mutation. +func (m *PreservationTaskMutation) Name() (r string, exists bool) { + v := m.name if v == nil { return } return *v, true } -// OldType returns the old "type" field's value of the PreservationAction entity. -// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// OldName returns the old "name" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationActionMutation) OldType(ctx context.Context) (v int8, err error) { +func (m *PreservationTaskMutation) OldName(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldType is only allowed on UpdateOne operations") + return v, errors.New("OldName is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldType requires an ID field in the mutation") + return v, errors.New("OldName requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldType: %w", err) - } - return oldValue.Type, nil -} - -// AddType adds i to the "type" field. -func (m *PreservationActionMutation) AddType(i int8) { - if m.add_type != nil { - *m.add_type += i - } else { - m.add_type = &i - } -} - -// AddedType returns the value that was added to the "type" field in this mutation. -func (m *PreservationActionMutation) AddedType() (r int8, exists bool) { - v := m.add_type - if v == nil { - return + return v, fmt.Errorf("querying old value for OldName: %w", err) } - return *v, true + return oldValue.Name, nil } -// ResetType resets all changes to the "type" field. -func (m *PreservationActionMutation) ResetType() { - m._type = nil - m.add_type = nil +// ResetName resets all changes to the "name" field. +func (m *PreservationTaskMutation) ResetName() { + m.name = nil } // SetStatus sets the "status" field. -func (m *PreservationActionMutation) SetStatus(i int8) { +func (m *PreservationTaskMutation) SetStatus(i int8) { m.status = &i m.addstatus = nil } // Status returns the value of the "status" field in the mutation. -func (m *PreservationActionMutation) Status() (r int8, exists bool) { +func (m *PreservationTaskMutation) Status() (r int8, exists bool) { v := m.status if v == nil { return @@ -1227,10 +1083,10 @@ func (m *PreservationActionMutation) Status() (r int8, exists bool) { return *v, true } -// OldStatus returns the old "status" field's value of the PreservationAction entity. -// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// OldStatus returns the old "status" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationActionMutation) OldStatus(ctx context.Context) (v int8, err error) { +func (m *PreservationTaskMutation) OldStatus(ctx context.Context) (v int8, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldStatus is only allowed on UpdateOne operations") } @@ -1245,7 +1101,7 @@ func (m *PreservationActionMutation) OldStatus(ctx context.Context) (v int8, err } // AddStatus adds i to the "status" field. -func (m *PreservationActionMutation) AddStatus(i int8) { +func (m *PreservationTaskMutation) AddStatus(i int8) { if m.addstatus != nil { *m.addstatus += i } else { @@ -1254,7 +1110,7 @@ func (m *PreservationActionMutation) AddStatus(i int8) { } // AddedStatus returns the value that was added to the "status" field in this mutation. -func (m *PreservationActionMutation) AddedStatus() (r int8, exists bool) { +func (m *PreservationTaskMutation) AddedStatus() (r int8, exists bool) { v := m.addstatus if v == nil { return @@ -1263,18 +1119,18 @@ func (m *PreservationActionMutation) AddedStatus() (r int8, exists bool) { } // ResetStatus resets all changes to the "status" field. -func (m *PreservationActionMutation) ResetStatus() { +func (m *PreservationTaskMutation) ResetStatus() { m.status = nil m.addstatus = nil } // SetStartedAt sets the "started_at" field. -func (m *PreservationActionMutation) SetStartedAt(t time.Time) { +func (m *PreservationTaskMutation) SetStartedAt(t time.Time) { m.started_at = &t } // StartedAt returns the value of the "started_at" field in the mutation. -func (m *PreservationActionMutation) StartedAt() (r time.Time, exists bool) { +func (m *PreservationTaskMutation) StartedAt() (r time.Time, exists bool) { v := m.started_at if v == nil { return @@ -1282,10 +1138,10 @@ func (m *PreservationActionMutation) StartedAt() (r time.Time, exists bool) { return *v, true } -// OldStartedAt returns the old "started_at" field's value of the PreservationAction entity. -// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// OldStartedAt returns the old "started_at" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationActionMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { +func (m *PreservationTaskMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") } @@ -1300,30 +1156,30 @@ func (m *PreservationActionMutation) OldStartedAt(ctx context.Context) (v time.T } // ClearStartedAt clears the value of the "started_at" field. -func (m *PreservationActionMutation) ClearStartedAt() { +func (m *PreservationTaskMutation) ClearStartedAt() { m.started_at = nil - m.clearedFields[preservationaction.FieldStartedAt] = struct{}{} + m.clearedFields[preservationtask.FieldStartedAt] = struct{}{} } // StartedAtCleared returns if the "started_at" field was cleared in this mutation. -func (m *PreservationActionMutation) StartedAtCleared() bool { - _, ok := m.clearedFields[preservationaction.FieldStartedAt] +func (m *PreservationTaskMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[preservationtask.FieldStartedAt] return ok } // ResetStartedAt resets all changes to the "started_at" field. -func (m *PreservationActionMutation) ResetStartedAt() { +func (m *PreservationTaskMutation) ResetStartedAt() { m.started_at = nil - delete(m.clearedFields, preservationaction.FieldStartedAt) + delete(m.clearedFields, preservationtask.FieldStartedAt) } // SetCompletedAt sets the "completed_at" field. -func (m *PreservationActionMutation) SetCompletedAt(t time.Time) { +func (m *PreservationTaskMutation) SetCompletedAt(t time.Time) { m.completed_at = &t } // CompletedAt returns the value of the "completed_at" field in the mutation. -func (m *PreservationActionMutation) CompletedAt() (r time.Time, exists bool) { +func (m *PreservationTaskMutation) CompletedAt() (r time.Time, exists bool) { v := m.completed_at if v == nil { return @@ -1331,10 +1187,10 @@ func (m *PreservationActionMutation) CompletedAt() (r time.Time, exists bool) { return *v, true } -// OldCompletedAt returns the old "completed_at" field's value of the PreservationAction entity. -// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// OldCompletedAt returns the old "completed_at" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationActionMutation) OldCompletedAt(ctx context.Context) (v time.Time, err error) { +func (m *PreservationTaskMutation) OldCompletedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCompletedAt is only allowed on UpdateOne operations") } @@ -1349,149 +1205,144 @@ func (m *PreservationActionMutation) OldCompletedAt(ctx context.Context) (v time } // ClearCompletedAt clears the value of the "completed_at" field. -func (m *PreservationActionMutation) ClearCompletedAt() { +func (m *PreservationTaskMutation) ClearCompletedAt() { m.completed_at = nil - m.clearedFields[preservationaction.FieldCompletedAt] = struct{}{} + m.clearedFields[preservationtask.FieldCompletedAt] = struct{}{} } // CompletedAtCleared returns if the "completed_at" field was cleared in this mutation. -func (m *PreservationActionMutation) CompletedAtCleared() bool { - _, ok := m.clearedFields[preservationaction.FieldCompletedAt] +func (m *PreservationTaskMutation) CompletedAtCleared() bool { + _, ok := m.clearedFields[preservationtask.FieldCompletedAt] return ok } // ResetCompletedAt resets all changes to the "completed_at" field. -func (m *PreservationActionMutation) ResetCompletedAt() { +func (m *PreservationTaskMutation) ResetCompletedAt() { m.completed_at = nil - delete(m.clearedFields, preservationaction.FieldCompletedAt) + delete(m.clearedFields, preservationtask.FieldCompletedAt) } -// SetPackageID sets the "package_id" field. -func (m *PreservationActionMutation) SetPackageID(i int) { - m._package = &i +// SetNote sets the "note" field. +func (m *PreservationTaskMutation) SetNote(s string) { + m.note = &s } -// PackageID returns the value of the "package_id" field in the mutation. -func (m *PreservationActionMutation) PackageID() (r int, exists bool) { - v := m._package +// Note returns the value of the "note" field in the mutation. +func (m *PreservationTaskMutation) Note() (r string, exists bool) { + v := m.note if v == nil { return } return *v, true } -// OldPackageID returns the old "package_id" field's value of the PreservationAction entity. -// If the PreservationAction object wasn't provided to the builder, the object is fetched from the database. +// OldNote returns the old "note" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationActionMutation) OldPackageID(ctx context.Context) (v int, err error) { +func (m *PreservationTaskMutation) OldNote(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPackageID is only allowed on UpdateOne operations") + return v, errors.New("OldNote is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPackageID requires an ID field in the mutation") + return v, errors.New("OldNote requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldPackageID: %w", err) + return v, fmt.Errorf("querying old value for OldNote: %w", err) } - return oldValue.PackageID, nil + return oldValue.Note, nil } -// ResetPackageID resets all changes to the "package_id" field. -func (m *PreservationActionMutation) ResetPackageID() { - m._package = nil +// ResetNote resets all changes to the "note" field. +func (m *PreservationTaskMutation) ResetNote() { + m.note = nil } -// ClearPackage clears the "package" edge to the Pkg entity. -func (m *PreservationActionMutation) ClearPackage() { - m.cleared_package = true - m.clearedFields[preservationaction.FieldPackageID] = struct{}{} +// SetPreservationActionID sets the "preservation_action_id" field. +func (m *PreservationTaskMutation) SetPreservationActionID(i int) { + m.action = &i } -// PackageCleared reports if the "package" edge to the Pkg entity was cleared. -func (m *PreservationActionMutation) PackageCleared() bool { - return m.cleared_package +// PreservationActionID returns the value of the "preservation_action_id" field in the mutation. +func (m *PreservationTaskMutation) PreservationActionID() (r int, exists bool) { + v := m.action + if v == nil { + return + } + return *v, true } -// PackageIDs returns the "package" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// PackageID instead. It exists only for internal usage by the builders. -func (m *PreservationActionMutation) PackageIDs() (ids []int) { - if id := m._package; id != nil { - ids = append(ids, *id) - } - return -} - -// ResetPackage resets all changes to the "package" edge. -func (m *PreservationActionMutation) ResetPackage() { - m._package = nil - m.cleared_package = false -} - -// AddTaskIDs adds the "tasks" edge to the PreservationTask entity by ids. -func (m *PreservationActionMutation) AddTaskIDs(ids ...int) { - if m.tasks == nil { - m.tasks = make(map[int]struct{}) +// OldPreservationActionID returns the old "preservation_action_id" field's value of the PreservationTask entity. +// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PreservationTaskMutation) OldPreservationActionID(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPreservationActionID is only allowed on UpdateOne operations") } - for i := range ids { - m.tasks[ids[i]] = struct{}{} + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPreservationActionID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPreservationActionID: %w", err) } + return oldValue.PreservationActionID, nil } -// ClearTasks clears the "tasks" edge to the PreservationTask entity. -func (m *PreservationActionMutation) ClearTasks() { - m.clearedtasks = true +// ResetPreservationActionID resets all changes to the "preservation_action_id" field. +func (m *PreservationTaskMutation) ResetPreservationActionID() { + m.action = nil } -// TasksCleared reports if the "tasks" edge to the PreservationTask entity was cleared. -func (m *PreservationActionMutation) TasksCleared() bool { - return m.clearedtasks +// SetActionID sets the "action" edge to the PreservationAction entity by id. +func (m *PreservationTaskMutation) SetActionID(id int) { + m.action = &id } -// RemoveTaskIDs removes the "tasks" edge to the PreservationTask entity by IDs. -func (m *PreservationActionMutation) RemoveTaskIDs(ids ...int) { - if m.removedtasks == nil { - m.removedtasks = make(map[int]struct{}) - } - for i := range ids { - delete(m.tasks, ids[i]) - m.removedtasks[ids[i]] = struct{}{} - } +// ClearAction clears the "action" edge to the PreservationAction entity. +func (m *PreservationTaskMutation) ClearAction() { + m.clearedaction = true + m.clearedFields[preservationtask.FieldPreservationActionID] = struct{}{} } -// RemovedTasks returns the removed IDs of the "tasks" edge to the PreservationTask entity. -func (m *PreservationActionMutation) RemovedTasksIDs() (ids []int) { - for id := range m.removedtasks { - ids = append(ids, id) +// ActionCleared reports if the "action" edge to the PreservationAction entity was cleared. +func (m *PreservationTaskMutation) ActionCleared() bool { + return m.clearedaction +} + +// ActionID returns the "action" edge ID in the mutation. +func (m *PreservationTaskMutation) ActionID() (id int, exists bool) { + if m.action != nil { + return *m.action, true } return } -// TasksIDs returns the "tasks" edge IDs in the mutation. -func (m *PreservationActionMutation) TasksIDs() (ids []int) { - for id := range m.tasks { - ids = append(ids, id) +// ActionIDs returns the "action" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// ActionID instead. It exists only for internal usage by the builders. +func (m *PreservationTaskMutation) ActionIDs() (ids []int) { + if id := m.action; id != nil { + ids = append(ids, *id) } return } -// ResetTasks resets all changes to the "tasks" edge. -func (m *PreservationActionMutation) ResetTasks() { - m.tasks = nil - m.clearedtasks = false - m.removedtasks = nil +// ResetAction resets all changes to the "action" edge. +func (m *PreservationTaskMutation) ResetAction() { + m.action = nil + m.clearedaction = false } -// Where appends a list predicates to the PreservationActionMutation builder. -func (m *PreservationActionMutation) Where(ps ...predicate.PreservationAction) { +// Where appends a list predicates to the PreservationTaskMutation builder. +func (m *PreservationTaskMutation) Where(ps ...predicate.PreservationTask) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the PreservationActionMutation builder. Using this method, +// WhereP appends storage-level predicates to the PreservationTaskMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *PreservationActionMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.PreservationAction, len(ps)) +func (m *PreservationTaskMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PreservationTask, len(ps)) for i := range ps { p[i] = ps[i] } @@ -1499,42 +1350,45 @@ func (m *PreservationActionMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *PreservationActionMutation) Op() Op { +func (m *PreservationTaskMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *PreservationActionMutation) SetOp(op Op) { +func (m *PreservationTaskMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (PreservationAction). -func (m *PreservationActionMutation) Type() string { +// Type returns the node type of this mutation (PreservationTask). +func (m *PreservationTaskMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *PreservationActionMutation) Fields() []string { - fields := make([]string, 0, 6) - if m.workflow_id != nil { - fields = append(fields, preservationaction.FieldWorkflowID) +func (m *PreservationTaskMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.task_id != nil { + fields = append(fields, preservationtask.FieldTaskID) } - if m._type != nil { - fields = append(fields, preservationaction.FieldType) + if m.name != nil { + fields = append(fields, preservationtask.FieldName) } if m.status != nil { - fields = append(fields, preservationaction.FieldStatus) + fields = append(fields, preservationtask.FieldStatus) } if m.started_at != nil { - fields = append(fields, preservationaction.FieldStartedAt) + fields = append(fields, preservationtask.FieldStartedAt) } if m.completed_at != nil { - fields = append(fields, preservationaction.FieldCompletedAt) + fields = append(fields, preservationtask.FieldCompletedAt) } - if m._package != nil { - fields = append(fields, preservationaction.FieldPackageID) + if m.note != nil { + fields = append(fields, preservationtask.FieldNote) + } + if m.action != nil { + fields = append(fields, preservationtask.FieldPreservationActionID) } return fields } @@ -1542,20 +1396,22 @@ func (m *PreservationActionMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *PreservationActionMutation) Field(name string) (ent.Value, bool) { +func (m *PreservationTaskMutation) Field(name string) (ent.Value, bool) { switch name { - case preservationaction.FieldWorkflowID: - return m.WorkflowID() - case preservationaction.FieldType: - return m.GetType() - case preservationaction.FieldStatus: + case preservationtask.FieldTaskID: + return m.TaskID() + case preservationtask.FieldName: + return m.Name() + case preservationtask.FieldStatus: return m.Status() - case preservationaction.FieldStartedAt: + case preservationtask.FieldStartedAt: return m.StartedAt() - case preservationaction.FieldCompletedAt: + case preservationtask.FieldCompletedAt: return m.CompletedAt() - case preservationaction.FieldPackageID: - return m.PackageID() + case preservationtask.FieldNote: + return m.Note() + case preservationtask.FieldPreservationActionID: + return m.PreservationActionID() } return nil, false } @@ -1563,84 +1419,90 @@ func (m *PreservationActionMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *PreservationActionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *PreservationTaskMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case preservationaction.FieldWorkflowID: - return m.OldWorkflowID(ctx) - case preservationaction.FieldType: - return m.OldType(ctx) - case preservationaction.FieldStatus: + case preservationtask.FieldTaskID: + return m.OldTaskID(ctx) + case preservationtask.FieldName: + return m.OldName(ctx) + case preservationtask.FieldStatus: return m.OldStatus(ctx) - case preservationaction.FieldStartedAt: + case preservationtask.FieldStartedAt: return m.OldStartedAt(ctx) - case preservationaction.FieldCompletedAt: + case preservationtask.FieldCompletedAt: return m.OldCompletedAt(ctx) - case preservationaction.FieldPackageID: - return m.OldPackageID(ctx) + case preservationtask.FieldNote: + return m.OldNote(ctx) + case preservationtask.FieldPreservationActionID: + return m.OldPreservationActionID(ctx) } - return nil, fmt.Errorf("unknown PreservationAction field %s", name) + return nil, fmt.Errorf("unknown PreservationTask field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PreservationActionMutation) SetField(name string, value ent.Value) error { +func (m *PreservationTaskMutation) SetField(name string, value ent.Value) error { switch name { - case preservationaction.FieldWorkflowID: - v, ok := value.(string) + case preservationtask.FieldTaskID: + v, ok := value.(uuid.UUID) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetWorkflowID(v) + m.SetTaskID(v) return nil - case preservationaction.FieldType: - v, ok := value.(int8) + case preservationtask.FieldName: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetType(v) + m.SetName(v) return nil - case preservationaction.FieldStatus: + case preservationtask.FieldStatus: v, ok := value.(int8) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetStatus(v) return nil - case preservationaction.FieldStartedAt: + case preservationtask.FieldStartedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetStartedAt(v) return nil - case preservationaction.FieldCompletedAt: + case preservationtask.FieldCompletedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetCompletedAt(v) return nil - case preservationaction.FieldPackageID: + case preservationtask.FieldNote: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNote(v) + return nil + case preservationtask.FieldPreservationActionID: v, ok := value.(int) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetPackageID(v) + m.SetPreservationActionID(v) return nil } - return fmt.Errorf("unknown PreservationAction field %s", name) + return fmt.Errorf("unknown PreservationTask field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *PreservationActionMutation) AddedFields() []string { +func (m *PreservationTaskMutation) AddedFields() []string { var fields []string - if m.add_type != nil { - fields = append(fields, preservationaction.FieldType) - } if m.addstatus != nil { - fields = append(fields, preservationaction.FieldStatus) + fields = append(fields, preservationtask.FieldStatus) } return fields } @@ -1648,11 +1510,9 @@ func (m *PreservationActionMutation) AddedFields() []string { // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *PreservationActionMutation) AddedField(name string) (ent.Value, bool) { +func (m *PreservationTaskMutation) AddedField(name string) (ent.Value, bool) { switch name { - case preservationaction.FieldType: - return m.AddedType() - case preservationaction.FieldStatus: + case preservationtask.FieldStatus: return m.AddedStatus() } return nil, false @@ -1661,16 +1521,9 @@ func (m *PreservationActionMutation) AddedField(name string) (ent.Value, bool) { // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PreservationActionMutation) AddField(name string, value ent.Value) error { +func (m *PreservationTaskMutation) AddField(name string, value ent.Value) error { switch name { - case preservationaction.FieldType: - v, ok := value.(int8) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddType(v) - return nil - case preservationaction.FieldStatus: + case preservationtask.FieldStatus: v, ok := value.(int8) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) @@ -1678,203 +1531,182 @@ func (m *PreservationActionMutation) AddField(name string, value ent.Value) erro m.AddStatus(v) return nil } - return fmt.Errorf("unknown PreservationAction numeric field %s", name) + return fmt.Errorf("unknown PreservationTask numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *PreservationActionMutation) ClearedFields() []string { +func (m *PreservationTaskMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(preservationaction.FieldStartedAt) { - fields = append(fields, preservationaction.FieldStartedAt) + if m.FieldCleared(preservationtask.FieldStartedAt) { + fields = append(fields, preservationtask.FieldStartedAt) } - if m.FieldCleared(preservationaction.FieldCompletedAt) { - fields = append(fields, preservationaction.FieldCompletedAt) + if m.FieldCleared(preservationtask.FieldCompletedAt) { + fields = append(fields, preservationtask.FieldCompletedAt) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *PreservationActionMutation) FieldCleared(name string) bool { +func (m *PreservationTaskMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *PreservationActionMutation) ClearField(name string) error { +func (m *PreservationTaskMutation) ClearField(name string) error { switch name { - case preservationaction.FieldStartedAt: + case preservationtask.FieldStartedAt: m.ClearStartedAt() return nil - case preservationaction.FieldCompletedAt: + case preservationtask.FieldCompletedAt: m.ClearCompletedAt() return nil } - return fmt.Errorf("unknown PreservationAction nullable field %s", name) + return fmt.Errorf("unknown PreservationTask nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *PreservationActionMutation) ResetField(name string) error { +func (m *PreservationTaskMutation) ResetField(name string) error { switch name { - case preservationaction.FieldWorkflowID: - m.ResetWorkflowID() + case preservationtask.FieldTaskID: + m.ResetTaskID() return nil - case preservationaction.FieldType: - m.ResetType() + case preservationtask.FieldName: + m.ResetName() return nil - case preservationaction.FieldStatus: + case preservationtask.FieldStatus: m.ResetStatus() return nil - case preservationaction.FieldStartedAt: + case preservationtask.FieldStartedAt: m.ResetStartedAt() return nil - case preservationaction.FieldCompletedAt: + case preservationtask.FieldCompletedAt: m.ResetCompletedAt() return nil - case preservationaction.FieldPackageID: - m.ResetPackageID() + case preservationtask.FieldNote: + m.ResetNote() + return nil + case preservationtask.FieldPreservationActionID: + m.ResetPreservationActionID() return nil } - return fmt.Errorf("unknown PreservationAction field %s", name) + return fmt.Errorf("unknown PreservationTask field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *PreservationActionMutation) AddedEdges() []string { - edges := make([]string, 0, 2) - if m._package != nil { - edges = append(edges, preservationaction.EdgePackage) - } - if m.tasks != nil { - edges = append(edges, preservationaction.EdgeTasks) +func (m *PreservationTaskMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.action != nil { + edges = append(edges, preservationtask.EdgeAction) } return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *PreservationActionMutation) AddedIDs(name string) []ent.Value { +func (m *PreservationTaskMutation) AddedIDs(name string) []ent.Value { switch name { - case preservationaction.EdgePackage: - if id := m._package; id != nil { + case preservationtask.EdgeAction: + if id := m.action; id != nil { return []ent.Value{*id} } - case preservationaction.EdgeTasks: - ids := make([]ent.Value, 0, len(m.tasks)) - for id := range m.tasks { - ids = append(ids, id) - } - return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *PreservationActionMutation) RemovedEdges() []string { - edges := make([]string, 0, 2) - if m.removedtasks != nil { - edges = append(edges, preservationaction.EdgeTasks) - } +func (m *PreservationTaskMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *PreservationActionMutation) RemovedIDs(name string) []ent.Value { - switch name { - case preservationaction.EdgeTasks: - ids := make([]ent.Value, 0, len(m.removedtasks)) - for id := range m.removedtasks { - ids = append(ids, id) - } - return ids - } +func (m *PreservationTaskMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *PreservationActionMutation) ClearedEdges() []string { - edges := make([]string, 0, 2) - if m.cleared_package { - edges = append(edges, preservationaction.EdgePackage) - } - if m.clearedtasks { - edges = append(edges, preservationaction.EdgeTasks) +func (m *PreservationTaskMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedaction { + edges = append(edges, preservationtask.EdgeAction) } return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *PreservationActionMutation) EdgeCleared(name string) bool { +func (m *PreservationTaskMutation) EdgeCleared(name string) bool { switch name { - case preservationaction.EdgePackage: - return m.cleared_package - case preservationaction.EdgeTasks: - return m.clearedtasks + case preservationtask.EdgeAction: + return m.clearedaction } return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *PreservationActionMutation) ClearEdge(name string) error { +func (m *PreservationTaskMutation) ClearEdge(name string) error { switch name { - case preservationaction.EdgePackage: - m.ClearPackage() + case preservationtask.EdgeAction: + m.ClearAction() return nil } - return fmt.Errorf("unknown PreservationAction unique edge %s", name) + return fmt.Errorf("unknown PreservationTask unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *PreservationActionMutation) ResetEdge(name string) error { +func (m *PreservationTaskMutation) ResetEdge(name string) error { switch name { - case preservationaction.EdgePackage: - m.ResetPackage() - return nil - case preservationaction.EdgeTasks: - m.ResetTasks() + case preservationtask.EdgeAction: + m.ResetAction() return nil } - return fmt.Errorf("unknown PreservationAction edge %s", name) + return fmt.Errorf("unknown PreservationTask edge %s", name) } -// PreservationTaskMutation represents an operation that mutates the PreservationTask nodes in the graph. -type PreservationTaskMutation struct { +// SIPMutation represents an operation that mutates the SIP nodes in the graph. +type SIPMutation struct { config - op Op - typ string - id *int - task_id *uuid.UUID - name *string - status *int8 - addstatus *int8 - started_at *time.Time - completed_at *time.Time - note *string - clearedFields map[string]struct{} - action *int - clearedaction bool - done bool - oldValue func(context.Context) (*PreservationTask, error) - predicates []predicate.PreservationTask + op Op + typ string + id *int + name *string + workflow_id *string + run_id *uuid.UUID + aip_id *uuid.UUID + location_id *uuid.UUID + status *int8 + addstatus *int8 + created_at *time.Time + started_at *time.Time + completed_at *time.Time + clearedFields map[string]struct{} + preservation_actions map[int]struct{} + removedpreservation_actions map[int]struct{} + clearedpreservation_actions bool + done bool + oldValue func(context.Context) (*SIP, error) + predicates []predicate.SIP } -var _ ent.Mutation = (*PreservationTaskMutation)(nil) +var _ ent.Mutation = (*SIPMutation)(nil) -// preservationtaskOption allows management of the mutation configuration using functional options. -type preservationtaskOption func(*PreservationTaskMutation) +// sipOption allows management of the mutation configuration using functional options. +type sipOption func(*SIPMutation) -// newPreservationTaskMutation creates new mutation for the PreservationTask entity. -func newPreservationTaskMutation(c config, op Op, opts ...preservationtaskOption) *PreservationTaskMutation { - m := &PreservationTaskMutation{ +// newSIPMutation creates new mutation for the SIP entity. +func newSIPMutation(c config, op Op, opts ...sipOption) *SIPMutation { + m := &SIPMutation{ config: c, op: op, - typ: TypePreservationTask, + typ: TypeSIP, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -1883,20 +1715,20 @@ func newPreservationTaskMutation(c config, op Op, opts ...preservationtaskOption return m } -// withPreservationTaskID sets the ID field of the mutation. -func withPreservationTaskID(id int) preservationtaskOption { - return func(m *PreservationTaskMutation) { +// withSIPID sets the ID field of the mutation. +func withSIPID(id int) sipOption { + return func(m *SIPMutation) { var ( err error once sync.Once - value *PreservationTask + value *SIP ) - m.oldValue = func(ctx context.Context) (*PreservationTask, error) { + m.oldValue = func(ctx context.Context) (*SIP, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().PreservationTask.Get(ctx, id) + value, err = m.Client().SIP.Get(ctx, id) } }) return value, err @@ -1905,143 +1737,277 @@ func withPreservationTaskID(id int) preservationtaskOption { } } -// withPreservationTask sets the old PreservationTask of the mutation. -func withPreservationTask(node *PreservationTask) preservationtaskOption { - return func(m *PreservationTaskMutation) { - m.oldValue = func(context.Context) (*PreservationTask, error) { +// withSIP sets the old SIP of the mutation. +func withSIP(node *SIP) sipOption { + return func(m *SIPMutation) { + m.oldValue = func(context.Context) (*SIP, error) { return node, nil } m.id = &node.ID } } -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m PreservationTaskMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m SIPMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m SIPMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *SIPMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *SIPMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().SIP.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *SIPMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *SIPMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SIPMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *SIPMutation) ResetName() { + m.name = nil +} + +// SetWorkflowID sets the "workflow_id" field. +func (m *SIPMutation) SetWorkflowID(s string) { + m.workflow_id = &s +} + +// WorkflowID returns the value of the "workflow_id" field in the mutation. +func (m *SIPMutation) WorkflowID() (r string, exists bool) { + v := m.workflow_id + if v == nil { + return + } + return *v, true +} + +// OldWorkflowID returns the old "workflow_id" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SIPMutation) OldWorkflowID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWorkflowID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWorkflowID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWorkflowID: %w", err) + } + return oldValue.WorkflowID, nil +} + +// ResetWorkflowID resets all changes to the "workflow_id" field. +func (m *SIPMutation) ResetWorkflowID() { + m.workflow_id = nil } -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m PreservationTaskMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("db: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil +// SetRunID sets the "run_id" field. +func (m *SIPMutation) SetRunID(u uuid.UUID) { + m.run_id = &u } -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *PreservationTaskMutation) ID() (id int, exists bool) { - if m.id == nil { +// RunID returns the value of the "run_id" field in the mutation. +func (m *SIPMutation) RunID() (r uuid.UUID, exists bool) { + v := m.run_id + if v == nil { return } - return *m.id, true + return *v, true } -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *PreservationTaskMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().PreservationTask.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) +// OldRunID returns the old "run_id" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SIPMutation) OldRunID(ctx context.Context) (v uuid.UUID, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRunID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRunID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRunID: %w", err) } + return oldValue.RunID, nil } -// SetTaskID sets the "task_id" field. -func (m *PreservationTaskMutation) SetTaskID(u uuid.UUID) { - m.task_id = &u +// ResetRunID resets all changes to the "run_id" field. +func (m *SIPMutation) ResetRunID() { + m.run_id = nil } -// TaskID returns the value of the "task_id" field in the mutation. -func (m *PreservationTaskMutation) TaskID() (r uuid.UUID, exists bool) { - v := m.task_id +// SetAipID sets the "aip_id" field. +func (m *SIPMutation) SetAipID(u uuid.UUID) { + m.aip_id = &u +} + +// AipID returns the value of the "aip_id" field in the mutation. +func (m *SIPMutation) AipID() (r uuid.UUID, exists bool) { + v := m.aip_id if v == nil { return } return *v, true } -// OldTaskID returns the old "task_id" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. +// OldAipID returns the old "aip_id" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldTaskID(ctx context.Context) (v uuid.UUID, err error) { +func (m *SIPMutation) OldAipID(ctx context.Context) (v uuid.UUID, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldTaskID is only allowed on UpdateOne operations") + return v, errors.New("OldAipID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldTaskID requires an ID field in the mutation") + return v, errors.New("OldAipID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldTaskID: %w", err) + return v, fmt.Errorf("querying old value for OldAipID: %w", err) } - return oldValue.TaskID, nil + return oldValue.AipID, nil } -// ResetTaskID resets all changes to the "task_id" field. -func (m *PreservationTaskMutation) ResetTaskID() { - m.task_id = nil +// ClearAipID clears the value of the "aip_id" field. +func (m *SIPMutation) ClearAipID() { + m.aip_id = nil + m.clearedFields[sip.FieldAipID] = struct{}{} } -// SetName sets the "name" field. -func (m *PreservationTaskMutation) SetName(s string) { - m.name = &s +// AipIDCleared returns if the "aip_id" field was cleared in this mutation. +func (m *SIPMutation) AipIDCleared() bool { + _, ok := m.clearedFields[sip.FieldAipID] + return ok } -// Name returns the value of the "name" field in the mutation. -func (m *PreservationTaskMutation) Name() (r string, exists bool) { - v := m.name +// ResetAipID resets all changes to the "aip_id" field. +func (m *SIPMutation) ResetAipID() { + m.aip_id = nil + delete(m.clearedFields, sip.FieldAipID) +} + +// SetLocationID sets the "location_id" field. +func (m *SIPMutation) SetLocationID(u uuid.UUID) { + m.location_id = &u +} + +// LocationID returns the value of the "location_id" field in the mutation. +func (m *SIPMutation) LocationID() (r uuid.UUID, exists bool) { + v := m.location_id if v == nil { return } return *v, true } -// OldName returns the old "name" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. +// OldLocationID returns the old "location_id" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldName(ctx context.Context) (v string, err error) { +func (m *SIPMutation) OldLocationID(ctx context.Context) (v uuid.UUID, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") + return v, errors.New("OldLocationID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") + return v, errors.New("OldLocationID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) + return v, fmt.Errorf("querying old value for OldLocationID: %w", err) } - return oldValue.Name, nil + return oldValue.LocationID, nil } -// ResetName resets all changes to the "name" field. -func (m *PreservationTaskMutation) ResetName() { - m.name = nil +// ClearLocationID clears the value of the "location_id" field. +func (m *SIPMutation) ClearLocationID() { + m.location_id = nil + m.clearedFields[sip.FieldLocationID] = struct{}{} +} + +// LocationIDCleared returns if the "location_id" field was cleared in this mutation. +func (m *SIPMutation) LocationIDCleared() bool { + _, ok := m.clearedFields[sip.FieldLocationID] + return ok +} + +// ResetLocationID resets all changes to the "location_id" field. +func (m *SIPMutation) ResetLocationID() { + m.location_id = nil + delete(m.clearedFields, sip.FieldLocationID) } // SetStatus sets the "status" field. -func (m *PreservationTaskMutation) SetStatus(i int8) { +func (m *SIPMutation) SetStatus(i int8) { m.status = &i m.addstatus = nil } // Status returns the value of the "status" field in the mutation. -func (m *PreservationTaskMutation) Status() (r int8, exists bool) { +func (m *SIPMutation) Status() (r int8, exists bool) { v := m.status if v == nil { return @@ -2049,10 +2015,10 @@ func (m *PreservationTaskMutation) Status() (r int8, exists bool) { return *v, true } -// OldStatus returns the old "status" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. +// OldStatus returns the old "status" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldStatus(ctx context.Context) (v int8, err error) { +func (m *SIPMutation) OldStatus(ctx context.Context) (v int8, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldStatus is only allowed on UpdateOne operations") } @@ -2067,7 +2033,7 @@ func (m *PreservationTaskMutation) OldStatus(ctx context.Context) (v int8, err e } // AddStatus adds i to the "status" field. -func (m *PreservationTaskMutation) AddStatus(i int8) { +func (m *SIPMutation) AddStatus(i int8) { if m.addstatus != nil { *m.addstatus += i } else { @@ -2076,7 +2042,7 @@ func (m *PreservationTaskMutation) AddStatus(i int8) { } // AddedStatus returns the value that was added to the "status" field in this mutation. -func (m *PreservationTaskMutation) AddedStatus() (r int8, exists bool) { +func (m *SIPMutation) AddedStatus() (r int8, exists bool) { v := m.addstatus if v == nil { return @@ -2085,18 +2051,54 @@ func (m *PreservationTaskMutation) AddedStatus() (r int8, exists bool) { } // ResetStatus resets all changes to the "status" field. -func (m *PreservationTaskMutation) ResetStatus() { +func (m *SIPMutation) ResetStatus() { m.status = nil m.addstatus = nil } +// SetCreatedAt sets the "created_at" field. +func (m *SIPMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *SIPMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SIPMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *SIPMutation) ResetCreatedAt() { + m.created_at = nil +} + // SetStartedAt sets the "started_at" field. -func (m *PreservationTaskMutation) SetStartedAt(t time.Time) { +func (m *SIPMutation) SetStartedAt(t time.Time) { m.started_at = &t } // StartedAt returns the value of the "started_at" field in the mutation. -func (m *PreservationTaskMutation) StartedAt() (r time.Time, exists bool) { +func (m *SIPMutation) StartedAt() (r time.Time, exists bool) { v := m.started_at if v == nil { return @@ -2104,10 +2106,10 @@ func (m *PreservationTaskMutation) StartedAt() (r time.Time, exists bool) { return *v, true } -// OldStartedAt returns the old "started_at" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. +// OldStartedAt returns the old "started_at" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { +func (m *SIPMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") } @@ -2122,30 +2124,30 @@ func (m *PreservationTaskMutation) OldStartedAt(ctx context.Context) (v time.Tim } // ClearStartedAt clears the value of the "started_at" field. -func (m *PreservationTaskMutation) ClearStartedAt() { +func (m *SIPMutation) ClearStartedAt() { m.started_at = nil - m.clearedFields[preservationtask.FieldStartedAt] = struct{}{} + m.clearedFields[sip.FieldStartedAt] = struct{}{} } // StartedAtCleared returns if the "started_at" field was cleared in this mutation. -func (m *PreservationTaskMutation) StartedAtCleared() bool { - _, ok := m.clearedFields[preservationtask.FieldStartedAt] +func (m *SIPMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[sip.FieldStartedAt] return ok } // ResetStartedAt resets all changes to the "started_at" field. -func (m *PreservationTaskMutation) ResetStartedAt() { +func (m *SIPMutation) ResetStartedAt() { m.started_at = nil - delete(m.clearedFields, preservationtask.FieldStartedAt) + delete(m.clearedFields, sip.FieldStartedAt) } // SetCompletedAt sets the "completed_at" field. -func (m *PreservationTaskMutation) SetCompletedAt(t time.Time) { +func (m *SIPMutation) SetCompletedAt(t time.Time) { m.completed_at = &t } // CompletedAt returns the value of the "completed_at" field in the mutation. -func (m *PreservationTaskMutation) CompletedAt() (r time.Time, exists bool) { +func (m *SIPMutation) CompletedAt() (r time.Time, exists bool) { v := m.completed_at if v == nil { return @@ -2153,10 +2155,10 @@ func (m *PreservationTaskMutation) CompletedAt() (r time.Time, exists bool) { return *v, true } -// OldCompletedAt returns the old "completed_at" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. +// OldCompletedAt returns the old "completed_at" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldCompletedAt(ctx context.Context) (v time.Time, err error) { +func (m *SIPMutation) OldCompletedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCompletedAt is only allowed on UpdateOne operations") } @@ -2171,144 +2173,86 @@ func (m *PreservationTaskMutation) OldCompletedAt(ctx context.Context) (v time.T } // ClearCompletedAt clears the value of the "completed_at" field. -func (m *PreservationTaskMutation) ClearCompletedAt() { +func (m *SIPMutation) ClearCompletedAt() { m.completed_at = nil - m.clearedFields[preservationtask.FieldCompletedAt] = struct{}{} + m.clearedFields[sip.FieldCompletedAt] = struct{}{} } // CompletedAtCleared returns if the "completed_at" field was cleared in this mutation. -func (m *PreservationTaskMutation) CompletedAtCleared() bool { - _, ok := m.clearedFields[preservationtask.FieldCompletedAt] +func (m *SIPMutation) CompletedAtCleared() bool { + _, ok := m.clearedFields[sip.FieldCompletedAt] return ok } // ResetCompletedAt resets all changes to the "completed_at" field. -func (m *PreservationTaskMutation) ResetCompletedAt() { +func (m *SIPMutation) ResetCompletedAt() { m.completed_at = nil - delete(m.clearedFields, preservationtask.FieldCompletedAt) -} - -// SetNote sets the "note" field. -func (m *PreservationTaskMutation) SetNote(s string) { - m.note = &s -} - -// Note returns the value of the "note" field in the mutation. -func (m *PreservationTaskMutation) Note() (r string, exists bool) { - v := m.note - if v == nil { - return - } - return *v, true -} - -// OldNote returns the old "note" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldNote(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldNote is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldNote requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldNote: %w", err) - } - return oldValue.Note, nil -} - -// ResetNote resets all changes to the "note" field. -func (m *PreservationTaskMutation) ResetNote() { - m.note = nil -} - -// SetPreservationActionID sets the "preservation_action_id" field. -func (m *PreservationTaskMutation) SetPreservationActionID(i int) { - m.action = &i -} - -// PreservationActionID returns the value of the "preservation_action_id" field in the mutation. -func (m *PreservationTaskMutation) PreservationActionID() (r int, exists bool) { - v := m.action - if v == nil { - return - } - return *v, true + delete(m.clearedFields, sip.FieldCompletedAt) } -// OldPreservationActionID returns the old "preservation_action_id" field's value of the PreservationTask entity. -// If the PreservationTask object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PreservationTaskMutation) OldPreservationActionID(ctx context.Context) (v int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPreservationActionID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPreservationActionID requires an ID field in the mutation") +// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by ids. +func (m *SIPMutation) AddPreservationActionIDs(ids ...int) { + if m.preservation_actions == nil { + m.preservation_actions = make(map[int]struct{}) } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPreservationActionID: %w", err) + for i := range ids { + m.preservation_actions[ids[i]] = struct{}{} } - return oldValue.PreservationActionID, nil -} - -// ResetPreservationActionID resets all changes to the "preservation_action_id" field. -func (m *PreservationTaskMutation) ResetPreservationActionID() { - m.action = nil } -// SetActionID sets the "action" edge to the PreservationAction entity by id. -func (m *PreservationTaskMutation) SetActionID(id int) { - m.action = &id +// ClearPreservationActions clears the "preservation_actions" edge to the PreservationAction entity. +func (m *SIPMutation) ClearPreservationActions() { + m.clearedpreservation_actions = true } -// ClearAction clears the "action" edge to the PreservationAction entity. -func (m *PreservationTaskMutation) ClearAction() { - m.clearedaction = true - m.clearedFields[preservationtask.FieldPreservationActionID] = struct{}{} +// PreservationActionsCleared reports if the "preservation_actions" edge to the PreservationAction entity was cleared. +func (m *SIPMutation) PreservationActionsCleared() bool { + return m.clearedpreservation_actions } -// ActionCleared reports if the "action" edge to the PreservationAction entity was cleared. -func (m *PreservationTaskMutation) ActionCleared() bool { - return m.clearedaction +// RemovePreservationActionIDs removes the "preservation_actions" edge to the PreservationAction entity by IDs. +func (m *SIPMutation) RemovePreservationActionIDs(ids ...int) { + if m.removedpreservation_actions == nil { + m.removedpreservation_actions = make(map[int]struct{}) + } + for i := range ids { + delete(m.preservation_actions, ids[i]) + m.removedpreservation_actions[ids[i]] = struct{}{} + } } -// ActionID returns the "action" edge ID in the mutation. -func (m *PreservationTaskMutation) ActionID() (id int, exists bool) { - if m.action != nil { - return *m.action, true +// RemovedPreservationActions returns the removed IDs of the "preservation_actions" edge to the PreservationAction entity. +func (m *SIPMutation) RemovedPreservationActionsIDs() (ids []int) { + for id := range m.removedpreservation_actions { + ids = append(ids, id) } return } -// ActionIDs returns the "action" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// ActionID instead. It exists only for internal usage by the builders. -func (m *PreservationTaskMutation) ActionIDs() (ids []int) { - if id := m.action; id != nil { - ids = append(ids, *id) +// PreservationActionsIDs returns the "preservation_actions" edge IDs in the mutation. +func (m *SIPMutation) PreservationActionsIDs() (ids []int) { + for id := range m.preservation_actions { + ids = append(ids, id) } return } -// ResetAction resets all changes to the "action" edge. -func (m *PreservationTaskMutation) ResetAction() { - m.action = nil - m.clearedaction = false +// ResetPreservationActions resets all changes to the "preservation_actions" edge. +func (m *SIPMutation) ResetPreservationActions() { + m.preservation_actions = nil + m.clearedpreservation_actions = false + m.removedpreservation_actions = nil } -// Where appends a list predicates to the PreservationTaskMutation builder. -func (m *PreservationTaskMutation) Where(ps ...predicate.PreservationTask) { +// Where appends a list predicates to the SIPMutation builder. +func (m *SIPMutation) Where(ps ...predicate.SIP) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the PreservationTaskMutation builder. Using this method, +// WhereP appends storage-level predicates to the SIPMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *PreservationTaskMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.PreservationTask, len(ps)) +func (m *SIPMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.SIP, len(ps)) for i := range ps { p[i] = ps[i] } @@ -2316,45 +2260,51 @@ func (m *PreservationTaskMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *PreservationTaskMutation) Op() Op { +func (m *SIPMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *PreservationTaskMutation) SetOp(op Op) { +func (m *SIPMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (PreservationTask). -func (m *PreservationTaskMutation) Type() string { +// Type returns the node type of this mutation (SIP). +func (m *SIPMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *PreservationTaskMutation) Fields() []string { - fields := make([]string, 0, 7) - if m.task_id != nil { - fields = append(fields, preservationtask.FieldTaskID) - } +func (m *SIPMutation) Fields() []string { + fields := make([]string, 0, 9) if m.name != nil { - fields = append(fields, preservationtask.FieldName) + fields = append(fields, sip.FieldName) + } + if m.workflow_id != nil { + fields = append(fields, sip.FieldWorkflowID) + } + if m.run_id != nil { + fields = append(fields, sip.FieldRunID) + } + if m.aip_id != nil { + fields = append(fields, sip.FieldAipID) + } + if m.location_id != nil { + fields = append(fields, sip.FieldLocationID) } if m.status != nil { - fields = append(fields, preservationtask.FieldStatus) + fields = append(fields, sip.FieldStatus) + } + if m.created_at != nil { + fields = append(fields, sip.FieldCreatedAt) } if m.started_at != nil { - fields = append(fields, preservationtask.FieldStartedAt) + fields = append(fields, sip.FieldStartedAt) } if m.completed_at != nil { - fields = append(fields, preservationtask.FieldCompletedAt) - } - if m.note != nil { - fields = append(fields, preservationtask.FieldNote) - } - if m.action != nil { - fields = append(fields, preservationtask.FieldPreservationActionID) + fields = append(fields, sip.FieldCompletedAt) } return fields } @@ -2362,22 +2312,26 @@ func (m *PreservationTaskMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *PreservationTaskMutation) Field(name string) (ent.Value, bool) { +func (m *SIPMutation) Field(name string) (ent.Value, bool) { switch name { - case preservationtask.FieldTaskID: - return m.TaskID() - case preservationtask.FieldName: + case sip.FieldName: return m.Name() - case preservationtask.FieldStatus: + case sip.FieldWorkflowID: + return m.WorkflowID() + case sip.FieldRunID: + return m.RunID() + case sip.FieldAipID: + return m.AipID() + case sip.FieldLocationID: + return m.LocationID() + case sip.FieldStatus: return m.Status() - case preservationtask.FieldStartedAt: + case sip.FieldCreatedAt: + return m.CreatedAt() + case sip.FieldStartedAt: return m.StartedAt() - case preservationtask.FieldCompletedAt: + case sip.FieldCompletedAt: return m.CompletedAt() - case preservationtask.FieldNote: - return m.Note() - case preservationtask.FieldPreservationActionID: - return m.PreservationActionID() } return nil, false } @@ -2385,90 +2339,108 @@ func (m *PreservationTaskMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *PreservationTaskMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *SIPMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case preservationtask.FieldTaskID: - return m.OldTaskID(ctx) - case preservationtask.FieldName: + case sip.FieldName: return m.OldName(ctx) - case preservationtask.FieldStatus: + case sip.FieldWorkflowID: + return m.OldWorkflowID(ctx) + case sip.FieldRunID: + return m.OldRunID(ctx) + case sip.FieldAipID: + return m.OldAipID(ctx) + case sip.FieldLocationID: + return m.OldLocationID(ctx) + case sip.FieldStatus: return m.OldStatus(ctx) - case preservationtask.FieldStartedAt: + case sip.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case sip.FieldStartedAt: return m.OldStartedAt(ctx) - case preservationtask.FieldCompletedAt: + case sip.FieldCompletedAt: return m.OldCompletedAt(ctx) - case preservationtask.FieldNote: - return m.OldNote(ctx) - case preservationtask.FieldPreservationActionID: - return m.OldPreservationActionID(ctx) } - return nil, fmt.Errorf("unknown PreservationTask field %s", name) + return nil, fmt.Errorf("unknown SIP field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PreservationTaskMutation) SetField(name string, value ent.Value) error { +func (m *SIPMutation) SetField(name string, value ent.Value) error { switch name { - case preservationtask.FieldTaskID: - v, ok := value.(uuid.UUID) + case sip.FieldName: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetTaskID(v) + m.SetName(v) return nil - case preservationtask.FieldName: + case sip.FieldWorkflowID: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetName(v) + m.SetWorkflowID(v) return nil - case preservationtask.FieldStatus: - v, ok := value.(int8) + case sip.FieldRunID: + v, ok := value.(uuid.UUID) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetStatus(v) + m.SetRunID(v) return nil - case preservationtask.FieldStartedAt: - v, ok := value.(time.Time) + case sip.FieldAipID: + v, ok := value.(uuid.UUID) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetStartedAt(v) + m.SetAipID(v) return nil - case preservationtask.FieldCompletedAt: + case sip.FieldLocationID: + v, ok := value.(uuid.UUID) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLocationID(v) + return nil + case sip.FieldStatus: + v, ok := value.(int8) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case sip.FieldCreatedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCompletedAt(v) + m.SetCreatedAt(v) return nil - case preservationtask.FieldNote: - v, ok := value.(string) + case sip.FieldStartedAt: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetNote(v) + m.SetStartedAt(v) return nil - case preservationtask.FieldPreservationActionID: - v, ok := value.(int) + case sip.FieldCompletedAt: + v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetPreservationActionID(v) + m.SetCompletedAt(v) return nil } - return fmt.Errorf("unknown PreservationTask field %s", name) + return fmt.Errorf("unknown SIP field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *PreservationTaskMutation) AddedFields() []string { +func (m *SIPMutation) AddedFields() []string { var fields []string if m.addstatus != nil { - fields = append(fields, preservationtask.FieldStatus) + fields = append(fields, sip.FieldStatus) } return fields } @@ -2476,9 +2448,9 @@ func (m *PreservationTaskMutation) AddedFields() []string { // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *PreservationTaskMutation) AddedField(name string) (ent.Value, bool) { +func (m *SIPMutation) AddedField(name string) (ent.Value, bool) { switch name { - case preservationtask.FieldStatus: + case sip.FieldStatus: return m.AddedStatus() } return nil, false @@ -2487,9 +2459,9 @@ func (m *PreservationTaskMutation) AddedField(name string) (ent.Value, bool) { // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PreservationTaskMutation) AddField(name string, value ent.Value) error { +func (m *SIPMutation) AddField(name string, value ent.Value) error { switch name { - case preservationtask.FieldStatus: + case sip.FieldStatus: v, ok := value.(int8) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) @@ -2497,142 +2469,170 @@ func (m *PreservationTaskMutation) AddField(name string, value ent.Value) error m.AddStatus(v) return nil } - return fmt.Errorf("unknown PreservationTask numeric field %s", name) + return fmt.Errorf("unknown SIP numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *PreservationTaskMutation) ClearedFields() []string { +func (m *SIPMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(preservationtask.FieldStartedAt) { - fields = append(fields, preservationtask.FieldStartedAt) + if m.FieldCleared(sip.FieldAipID) { + fields = append(fields, sip.FieldAipID) } - if m.FieldCleared(preservationtask.FieldCompletedAt) { - fields = append(fields, preservationtask.FieldCompletedAt) + if m.FieldCleared(sip.FieldLocationID) { + fields = append(fields, sip.FieldLocationID) + } + if m.FieldCleared(sip.FieldStartedAt) { + fields = append(fields, sip.FieldStartedAt) + } + if m.FieldCleared(sip.FieldCompletedAt) { + fields = append(fields, sip.FieldCompletedAt) } return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *PreservationTaskMutation) FieldCleared(name string) bool { +func (m *SIPMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *PreservationTaskMutation) ClearField(name string) error { +func (m *SIPMutation) ClearField(name string) error { switch name { - case preservationtask.FieldStartedAt: + case sip.FieldAipID: + m.ClearAipID() + return nil + case sip.FieldLocationID: + m.ClearLocationID() + return nil + case sip.FieldStartedAt: m.ClearStartedAt() return nil - case preservationtask.FieldCompletedAt: + case sip.FieldCompletedAt: m.ClearCompletedAt() return nil } - return fmt.Errorf("unknown PreservationTask nullable field %s", name) + return fmt.Errorf("unknown SIP nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *PreservationTaskMutation) ResetField(name string) error { +func (m *SIPMutation) ResetField(name string) error { switch name { - case preservationtask.FieldTaskID: - m.ResetTaskID() - return nil - case preservationtask.FieldName: + case sip.FieldName: m.ResetName() return nil - case preservationtask.FieldStatus: + case sip.FieldWorkflowID: + m.ResetWorkflowID() + return nil + case sip.FieldRunID: + m.ResetRunID() + return nil + case sip.FieldAipID: + m.ResetAipID() + return nil + case sip.FieldLocationID: + m.ResetLocationID() + return nil + case sip.FieldStatus: m.ResetStatus() return nil - case preservationtask.FieldStartedAt: + case sip.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case sip.FieldStartedAt: m.ResetStartedAt() return nil - case preservationtask.FieldCompletedAt: + case sip.FieldCompletedAt: m.ResetCompletedAt() return nil - case preservationtask.FieldNote: - m.ResetNote() - return nil - case preservationtask.FieldPreservationActionID: - m.ResetPreservationActionID() - return nil } - return fmt.Errorf("unknown PreservationTask field %s", name) + return fmt.Errorf("unknown SIP field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *PreservationTaskMutation) AddedEdges() []string { +func (m *SIPMutation) AddedEdges() []string { edges := make([]string, 0, 1) - if m.action != nil { - edges = append(edges, preservationtask.EdgeAction) + if m.preservation_actions != nil { + edges = append(edges, sip.EdgePreservationActions) } return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *PreservationTaskMutation) AddedIDs(name string) []ent.Value { +func (m *SIPMutation) AddedIDs(name string) []ent.Value { switch name { - case preservationtask.EdgeAction: - if id := m.action; id != nil { - return []ent.Value{*id} + case sip.EdgePreservationActions: + ids := make([]ent.Value, 0, len(m.preservation_actions)) + for id := range m.preservation_actions { + ids = append(ids, id) } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *PreservationTaskMutation) RemovedEdges() []string { +func (m *SIPMutation) RemovedEdges() []string { edges := make([]string, 0, 1) + if m.removedpreservation_actions != nil { + edges = append(edges, sip.EdgePreservationActions) + } return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *PreservationTaskMutation) RemovedIDs(name string) []ent.Value { +func (m *SIPMutation) RemovedIDs(name string) []ent.Value { + switch name { + case sip.EdgePreservationActions: + ids := make([]ent.Value, 0, len(m.removedpreservation_actions)) + for id := range m.removedpreservation_actions { + ids = append(ids, id) + } + return ids + } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *PreservationTaskMutation) ClearedEdges() []string { +func (m *SIPMutation) ClearedEdges() []string { edges := make([]string, 0, 1) - if m.clearedaction { - edges = append(edges, preservationtask.EdgeAction) + if m.clearedpreservation_actions { + edges = append(edges, sip.EdgePreservationActions) } return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *PreservationTaskMutation) EdgeCleared(name string) bool { +func (m *SIPMutation) EdgeCleared(name string) bool { switch name { - case preservationtask.EdgeAction: - return m.clearedaction + case sip.EdgePreservationActions: + return m.clearedpreservation_actions } return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *PreservationTaskMutation) ClearEdge(name string) error { +func (m *SIPMutation) ClearEdge(name string) error { switch name { - case preservationtask.EdgeAction: - m.ClearAction() - return nil } - return fmt.Errorf("unknown PreservationTask unique edge %s", name) + return fmt.Errorf("unknown SIP unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *PreservationTaskMutation) ResetEdge(name string) error { +func (m *SIPMutation) ResetEdge(name string) error { switch name { - case preservationtask.EdgeAction: - m.ResetAction() + case sip.EdgePreservationActions: + m.ResetPreservationActions() return nil } - return fmt.Errorf("unknown PreservationTask edge %s", name) + return fmt.Errorf("unknown SIP edge %s", name) } diff --git a/internal/persistence/ent/db/pkg/where.go b/internal/persistence/ent/db/pkg/where.go deleted file mode 100644 index c0c4d3459..000000000 --- a/internal/persistence/ent/db/pkg/where.go +++ /dev/null @@ -1,590 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package pkg - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" - "github.com/google/uuid" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldID, id)) -} - -// Name applies equality check predicate on the "name" field. It's identical to NameEQ. -func Name(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldName, v)) -} - -// WorkflowID applies equality check predicate on the "workflow_id" field. It's identical to WorkflowIDEQ. -func WorkflowID(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldWorkflowID, v)) -} - -// RunID applies equality check predicate on the "run_id" field. It's identical to RunIDEQ. -func RunID(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldRunID, v)) -} - -// AipID applies equality check predicate on the "aip_id" field. It's identical to AipIDEQ. -func AipID(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldAipID, v)) -} - -// LocationID applies equality check predicate on the "location_id" field. It's identical to LocationIDEQ. -func LocationID(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldLocationID, v)) -} - -// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. -func Status(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldStatus, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldCreatedAt, v)) -} - -// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. -func StartedAt(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldStartedAt, v)) -} - -// CompletedAt applies equality check predicate on the "completed_at" field. It's identical to CompletedAtEQ. -func CompletedAt(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldCompletedAt, v)) -} - -// NameEQ applies the EQ predicate on the "name" field. -func NameEQ(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldName, v)) -} - -// NameNEQ applies the NEQ predicate on the "name" field. -func NameNEQ(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldName, v)) -} - -// NameIn applies the In predicate on the "name" field. -func NameIn(vs ...string) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldName, vs...)) -} - -// NameNotIn applies the NotIn predicate on the "name" field. -func NameNotIn(vs ...string) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldName, vs...)) -} - -// NameGT applies the GT predicate on the "name" field. -func NameGT(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldName, v)) -} - -// NameGTE applies the GTE predicate on the "name" field. -func NameGTE(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldName, v)) -} - -// NameLT applies the LT predicate on the "name" field. -func NameLT(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldName, v)) -} - -// NameLTE applies the LTE predicate on the "name" field. -func NameLTE(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldName, v)) -} - -// NameContains applies the Contains predicate on the "name" field. -func NameContains(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldContains(FieldName, v)) -} - -// NameHasPrefix applies the HasPrefix predicate on the "name" field. -func NameHasPrefix(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldHasPrefix(FieldName, v)) -} - -// NameHasSuffix applies the HasSuffix predicate on the "name" field. -func NameHasSuffix(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldHasSuffix(FieldName, v)) -} - -// NameEqualFold applies the EqualFold predicate on the "name" field. -func NameEqualFold(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEqualFold(FieldName, v)) -} - -// NameContainsFold applies the ContainsFold predicate on the "name" field. -func NameContainsFold(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldContainsFold(FieldName, v)) -} - -// WorkflowIDEQ applies the EQ predicate on the "workflow_id" field. -func WorkflowIDEQ(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldWorkflowID, v)) -} - -// WorkflowIDNEQ applies the NEQ predicate on the "workflow_id" field. -func WorkflowIDNEQ(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldWorkflowID, v)) -} - -// WorkflowIDIn applies the In predicate on the "workflow_id" field. -func WorkflowIDIn(vs ...string) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldWorkflowID, vs...)) -} - -// WorkflowIDNotIn applies the NotIn predicate on the "workflow_id" field. -func WorkflowIDNotIn(vs ...string) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldWorkflowID, vs...)) -} - -// WorkflowIDGT applies the GT predicate on the "workflow_id" field. -func WorkflowIDGT(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldWorkflowID, v)) -} - -// WorkflowIDGTE applies the GTE predicate on the "workflow_id" field. -func WorkflowIDGTE(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldWorkflowID, v)) -} - -// WorkflowIDLT applies the LT predicate on the "workflow_id" field. -func WorkflowIDLT(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldWorkflowID, v)) -} - -// WorkflowIDLTE applies the LTE predicate on the "workflow_id" field. -func WorkflowIDLTE(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldWorkflowID, v)) -} - -// WorkflowIDContains applies the Contains predicate on the "workflow_id" field. -func WorkflowIDContains(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldContains(FieldWorkflowID, v)) -} - -// WorkflowIDHasPrefix applies the HasPrefix predicate on the "workflow_id" field. -func WorkflowIDHasPrefix(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldHasPrefix(FieldWorkflowID, v)) -} - -// WorkflowIDHasSuffix applies the HasSuffix predicate on the "workflow_id" field. -func WorkflowIDHasSuffix(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldHasSuffix(FieldWorkflowID, v)) -} - -// WorkflowIDEqualFold applies the EqualFold predicate on the "workflow_id" field. -func WorkflowIDEqualFold(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEqualFold(FieldWorkflowID, v)) -} - -// WorkflowIDContainsFold applies the ContainsFold predicate on the "workflow_id" field. -func WorkflowIDContainsFold(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldContainsFold(FieldWorkflowID, v)) -} - -// RunIDEQ applies the EQ predicate on the "run_id" field. -func RunIDEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldRunID, v)) -} - -// RunIDNEQ applies the NEQ predicate on the "run_id" field. -func RunIDNEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldRunID, v)) -} - -// RunIDIn applies the In predicate on the "run_id" field. -func RunIDIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldRunID, vs...)) -} - -// RunIDNotIn applies the NotIn predicate on the "run_id" field. -func RunIDNotIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldRunID, vs...)) -} - -// RunIDGT applies the GT predicate on the "run_id" field. -func RunIDGT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldRunID, v)) -} - -// RunIDGTE applies the GTE predicate on the "run_id" field. -func RunIDGTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldRunID, v)) -} - -// RunIDLT applies the LT predicate on the "run_id" field. -func RunIDLT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldRunID, v)) -} - -// RunIDLTE applies the LTE predicate on the "run_id" field. -func RunIDLTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldRunID, v)) -} - -// AipIDEQ applies the EQ predicate on the "aip_id" field. -func AipIDEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldAipID, v)) -} - -// AipIDNEQ applies the NEQ predicate on the "aip_id" field. -func AipIDNEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldAipID, v)) -} - -// AipIDIn applies the In predicate on the "aip_id" field. -func AipIDIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldAipID, vs...)) -} - -// AipIDNotIn applies the NotIn predicate on the "aip_id" field. -func AipIDNotIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldAipID, vs...)) -} - -// AipIDGT applies the GT predicate on the "aip_id" field. -func AipIDGT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldAipID, v)) -} - -// AipIDGTE applies the GTE predicate on the "aip_id" field. -func AipIDGTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldAipID, v)) -} - -// AipIDLT applies the LT predicate on the "aip_id" field. -func AipIDLT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldAipID, v)) -} - -// AipIDLTE applies the LTE predicate on the "aip_id" field. -func AipIDLTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldAipID, v)) -} - -// AipIDIsNil applies the IsNil predicate on the "aip_id" field. -func AipIDIsNil() predicate.Pkg { - return predicate.Pkg(sql.FieldIsNull(FieldAipID)) -} - -// AipIDNotNil applies the NotNil predicate on the "aip_id" field. -func AipIDNotNil() predicate.Pkg { - return predicate.Pkg(sql.FieldNotNull(FieldAipID)) -} - -// LocationIDEQ applies the EQ predicate on the "location_id" field. -func LocationIDEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldLocationID, v)) -} - -// LocationIDNEQ applies the NEQ predicate on the "location_id" field. -func LocationIDNEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldLocationID, v)) -} - -// LocationIDIn applies the In predicate on the "location_id" field. -func LocationIDIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldLocationID, vs...)) -} - -// LocationIDNotIn applies the NotIn predicate on the "location_id" field. -func LocationIDNotIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldLocationID, vs...)) -} - -// LocationIDGT applies the GT predicate on the "location_id" field. -func LocationIDGT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldLocationID, v)) -} - -// LocationIDGTE applies the GTE predicate on the "location_id" field. -func LocationIDGTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldLocationID, v)) -} - -// LocationIDLT applies the LT predicate on the "location_id" field. -func LocationIDLT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldLocationID, v)) -} - -// LocationIDLTE applies the LTE predicate on the "location_id" field. -func LocationIDLTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldLocationID, v)) -} - -// LocationIDIsNil applies the IsNil predicate on the "location_id" field. -func LocationIDIsNil() predicate.Pkg { - return predicate.Pkg(sql.FieldIsNull(FieldLocationID)) -} - -// LocationIDNotNil applies the NotNil predicate on the "location_id" field. -func LocationIDNotNil() predicate.Pkg { - return predicate.Pkg(sql.FieldNotNull(FieldLocationID)) -} - -// StatusEQ applies the EQ predicate on the "status" field. -func StatusEQ(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldStatus, v)) -} - -// StatusNEQ applies the NEQ predicate on the "status" field. -func StatusNEQ(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldStatus, v)) -} - -// StatusIn applies the In predicate on the "status" field. -func StatusIn(vs ...int8) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldStatus, vs...)) -} - -// StatusNotIn applies the NotIn predicate on the "status" field. -func StatusNotIn(vs ...int8) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldStatus, vs...)) -} - -// StatusGT applies the GT predicate on the "status" field. -func StatusGT(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldStatus, v)) -} - -// StatusGTE applies the GTE predicate on the "status" field. -func StatusGTE(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldStatus, v)) -} - -// StatusLT applies the LT predicate on the "status" field. -func StatusLT(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldStatus, v)) -} - -// StatusLTE applies the LTE predicate on the "status" field. -func StatusLTE(v int8) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldStatus, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldCreatedAt, v)) -} - -// StartedAtEQ applies the EQ predicate on the "started_at" field. -func StartedAtEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldStartedAt, v)) -} - -// StartedAtNEQ applies the NEQ predicate on the "started_at" field. -func StartedAtNEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldStartedAt, v)) -} - -// StartedAtIn applies the In predicate on the "started_at" field. -func StartedAtIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldStartedAt, vs...)) -} - -// StartedAtNotIn applies the NotIn predicate on the "started_at" field. -func StartedAtNotIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldStartedAt, vs...)) -} - -// StartedAtGT applies the GT predicate on the "started_at" field. -func StartedAtGT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldStartedAt, v)) -} - -// StartedAtGTE applies the GTE predicate on the "started_at" field. -func StartedAtGTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldStartedAt, v)) -} - -// StartedAtLT applies the LT predicate on the "started_at" field. -func StartedAtLT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldStartedAt, v)) -} - -// StartedAtLTE applies the LTE predicate on the "started_at" field. -func StartedAtLTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldStartedAt, v)) -} - -// StartedAtIsNil applies the IsNil predicate on the "started_at" field. -func StartedAtIsNil() predicate.Pkg { - return predicate.Pkg(sql.FieldIsNull(FieldStartedAt)) -} - -// StartedAtNotNil applies the NotNil predicate on the "started_at" field. -func StartedAtNotNil() predicate.Pkg { - return predicate.Pkg(sql.FieldNotNull(FieldStartedAt)) -} - -// CompletedAtEQ applies the EQ predicate on the "completed_at" field. -func CompletedAtEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldCompletedAt, v)) -} - -// CompletedAtNEQ applies the NEQ predicate on the "completed_at" field. -func CompletedAtNEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldCompletedAt, v)) -} - -// CompletedAtIn applies the In predicate on the "completed_at" field. -func CompletedAtIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldCompletedAt, vs...)) -} - -// CompletedAtNotIn applies the NotIn predicate on the "completed_at" field. -func CompletedAtNotIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldCompletedAt, vs...)) -} - -// CompletedAtGT applies the GT predicate on the "completed_at" field. -func CompletedAtGT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldCompletedAt, v)) -} - -// CompletedAtGTE applies the GTE predicate on the "completed_at" field. -func CompletedAtGTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldCompletedAt, v)) -} - -// CompletedAtLT applies the LT predicate on the "completed_at" field. -func CompletedAtLT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldCompletedAt, v)) -} - -// CompletedAtLTE applies the LTE predicate on the "completed_at" field. -func CompletedAtLTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldCompletedAt, v)) -} - -// CompletedAtIsNil applies the IsNil predicate on the "completed_at" field. -func CompletedAtIsNil() predicate.Pkg { - return predicate.Pkg(sql.FieldIsNull(FieldCompletedAt)) -} - -// CompletedAtNotNil applies the NotNil predicate on the "completed_at" field. -func CompletedAtNotNil() predicate.Pkg { - return predicate.Pkg(sql.FieldNotNull(FieldCompletedAt)) -} - -// HasPreservationActions applies the HasEdge predicate on the "preservation_actions" edge. -func HasPreservationActions() predicate.Pkg { - return predicate.Pkg(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, PreservationActionsTable, PreservationActionsColumn), - ) - sqlgraph.HasNeighbors(s, step) - }) -} - -// HasPreservationActionsWith applies the HasEdge predicate on the "preservation_actions" edge with a given conditions (other predicates). -func HasPreservationActionsWith(preds ...predicate.PreservationAction) predicate.Pkg { - return predicate.Pkg(func(s *sql.Selector) { - step := newPreservationActionsStep() - sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { - for _, p := range preds { - p(s) - } - }) - }) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.Pkg) predicate.Pkg { - return predicate.Pkg(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.Pkg) predicate.Pkg { - return predicate.Pkg(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.Pkg) predicate.Pkg { - return predicate.Pkg(sql.NotPredicates(p)) -} diff --git a/internal/persistence/ent/db/pkg_create.go b/internal/persistence/ent/db/pkg_create.go deleted file mode 100644 index af289e1db..000000000 --- a/internal/persistence/ent/db/pkg_create.go +++ /dev/null @@ -1,359 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" - "github.com/google/uuid" -) - -// PkgCreate is the builder for creating a Pkg entity. -type PkgCreate struct { - config - mutation *PkgMutation - hooks []Hook -} - -// SetName sets the "name" field. -func (pc *PkgCreate) SetName(s string) *PkgCreate { - pc.mutation.SetName(s) - return pc -} - -// SetWorkflowID sets the "workflow_id" field. -func (pc *PkgCreate) SetWorkflowID(s string) *PkgCreate { - pc.mutation.SetWorkflowID(s) - return pc -} - -// SetRunID sets the "run_id" field. -func (pc *PkgCreate) SetRunID(u uuid.UUID) *PkgCreate { - pc.mutation.SetRunID(u) - return pc -} - -// SetAipID sets the "aip_id" field. -func (pc *PkgCreate) SetAipID(u uuid.UUID) *PkgCreate { - pc.mutation.SetAipID(u) - return pc -} - -// SetNillableAipID sets the "aip_id" field if the given value is not nil. -func (pc *PkgCreate) SetNillableAipID(u *uuid.UUID) *PkgCreate { - if u != nil { - pc.SetAipID(*u) - } - return pc -} - -// SetLocationID sets the "location_id" field. -func (pc *PkgCreate) SetLocationID(u uuid.UUID) *PkgCreate { - pc.mutation.SetLocationID(u) - return pc -} - -// SetNillableLocationID sets the "location_id" field if the given value is not nil. -func (pc *PkgCreate) SetNillableLocationID(u *uuid.UUID) *PkgCreate { - if u != nil { - pc.SetLocationID(*u) - } - return pc -} - -// SetStatus sets the "status" field. -func (pc *PkgCreate) SetStatus(i int8) *PkgCreate { - pc.mutation.SetStatus(i) - return pc -} - -// SetCreatedAt sets the "created_at" field. -func (pc *PkgCreate) SetCreatedAt(t time.Time) *PkgCreate { - pc.mutation.SetCreatedAt(t) - return pc -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (pc *PkgCreate) SetNillableCreatedAt(t *time.Time) *PkgCreate { - if t != nil { - pc.SetCreatedAt(*t) - } - return pc -} - -// SetStartedAt sets the "started_at" field. -func (pc *PkgCreate) SetStartedAt(t time.Time) *PkgCreate { - pc.mutation.SetStartedAt(t) - return pc -} - -// SetNillableStartedAt sets the "started_at" field if the given value is not nil. -func (pc *PkgCreate) SetNillableStartedAt(t *time.Time) *PkgCreate { - if t != nil { - pc.SetStartedAt(*t) - } - return pc -} - -// SetCompletedAt sets the "completed_at" field. -func (pc *PkgCreate) SetCompletedAt(t time.Time) *PkgCreate { - pc.mutation.SetCompletedAt(t) - return pc -} - -// SetNillableCompletedAt sets the "completed_at" field if the given value is not nil. -func (pc *PkgCreate) SetNillableCompletedAt(t *time.Time) *PkgCreate { - if t != nil { - pc.SetCompletedAt(*t) - } - return pc -} - -// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by IDs. -func (pc *PkgCreate) AddPreservationActionIDs(ids ...int) *PkgCreate { - pc.mutation.AddPreservationActionIDs(ids...) - return pc -} - -// AddPreservationActions adds the "preservation_actions" edges to the PreservationAction entity. -func (pc *PkgCreate) AddPreservationActions(p ...*PreservationAction) *PkgCreate { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID - } - return pc.AddPreservationActionIDs(ids...) -} - -// Mutation returns the PkgMutation object of the builder. -func (pc *PkgCreate) Mutation() *PkgMutation { - return pc.mutation -} - -// Save creates the Pkg in the database. -func (pc *PkgCreate) Save(ctx context.Context) (*Pkg, error) { - pc.defaults() - return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (pc *PkgCreate) SaveX(ctx context.Context) *Pkg { - v, err := pc.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (pc *PkgCreate) Exec(ctx context.Context) error { - _, err := pc.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (pc *PkgCreate) ExecX(ctx context.Context) { - if err := pc.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (pc *PkgCreate) defaults() { - if _, ok := pc.mutation.CreatedAt(); !ok { - v := pkg.DefaultCreatedAt() - pc.mutation.SetCreatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (pc *PkgCreate) check() error { - if _, ok := pc.mutation.Name(); !ok { - return &ValidationError{Name: "name", err: errors.New(`db: missing required field "Pkg.name"`)} - } - if _, ok := pc.mutation.WorkflowID(); !ok { - return &ValidationError{Name: "workflow_id", err: errors.New(`db: missing required field "Pkg.workflow_id"`)} - } - if _, ok := pc.mutation.RunID(); !ok { - return &ValidationError{Name: "run_id", err: errors.New(`db: missing required field "Pkg.run_id"`)} - } - if _, ok := pc.mutation.Status(); !ok { - return &ValidationError{Name: "status", err: errors.New(`db: missing required field "Pkg.status"`)} - } - if _, ok := pc.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "Pkg.created_at"`)} - } - return nil -} - -func (pc *PkgCreate) sqlSave(ctx context.Context) (*Pkg, error) { - if err := pc.check(); err != nil { - return nil, err - } - _node, _spec := pc.createSpec() - if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - pc.mutation.id = &_node.ID - pc.mutation.done = true - return _node, nil -} - -func (pc *PkgCreate) createSpec() (*Pkg, *sqlgraph.CreateSpec) { - var ( - _node = &Pkg{config: pc.config} - _spec = sqlgraph.NewCreateSpec(pkg.Table, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - ) - if value, ok := pc.mutation.Name(); ok { - _spec.SetField(pkg.FieldName, field.TypeString, value) - _node.Name = value - } - if value, ok := pc.mutation.WorkflowID(); ok { - _spec.SetField(pkg.FieldWorkflowID, field.TypeString, value) - _node.WorkflowID = value - } - if value, ok := pc.mutation.RunID(); ok { - _spec.SetField(pkg.FieldRunID, field.TypeUUID, value) - _node.RunID = value - } - if value, ok := pc.mutation.AipID(); ok { - _spec.SetField(pkg.FieldAipID, field.TypeUUID, value) - _node.AipID = value - } - if value, ok := pc.mutation.LocationID(); ok { - _spec.SetField(pkg.FieldLocationID, field.TypeUUID, value) - _node.LocationID = value - } - if value, ok := pc.mutation.Status(); ok { - _spec.SetField(pkg.FieldStatus, field.TypeInt8, value) - _node.Status = value - } - if value, ok := pc.mutation.CreatedAt(); ok { - _spec.SetField(pkg.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if value, ok := pc.mutation.StartedAt(); ok { - _spec.SetField(pkg.FieldStartedAt, field.TypeTime, value) - _node.StartedAt = value - } - if value, ok := pc.mutation.CompletedAt(); ok { - _spec.SetField(pkg.FieldCompletedAt, field.TypeTime, value) - _node.CompletedAt = value - } - if nodes := pc.mutation.PreservationActionsIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges = append(_spec.Edges, edge) - } - return _node, _spec -} - -// PkgCreateBulk is the builder for creating many Pkg entities in bulk. -type PkgCreateBulk struct { - config - err error - builders []*PkgCreate -} - -// Save creates the Pkg entities in the database. -func (pcb *PkgCreateBulk) Save(ctx context.Context) ([]*Pkg, error) { - if pcb.err != nil { - return nil, pcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) - nodes := make([]*Pkg, len(pcb.builders)) - mutators := make([]Mutator, len(pcb.builders)) - for i := range pcb.builders { - func(i int, root context.Context) { - builder := pcb.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PkgMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (pcb *PkgCreateBulk) SaveX(ctx context.Context) []*Pkg { - v, err := pcb.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (pcb *PkgCreateBulk) Exec(ctx context.Context) error { - _, err := pcb.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (pcb *PkgCreateBulk) ExecX(ctx context.Context) { - if err := pcb.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/internal/persistence/ent/db/pkg_delete.go b/internal/persistence/ent/db/pkg_delete.go deleted file mode 100644 index 554e3a33f..000000000 --- a/internal/persistence/ent/db/pkg_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" -) - -// PkgDelete is the builder for deleting a Pkg entity. -type PkgDelete struct { - config - hooks []Hook - mutation *PkgMutation -} - -// Where appends a list predicates to the PkgDelete builder. -func (pd *PkgDelete) Where(ps ...predicate.Pkg) *PkgDelete { - pd.mutation.Where(ps...) - return pd -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (pd *PkgDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (pd *PkgDelete) ExecX(ctx context.Context) int { - n, err := pd.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (pd *PkgDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(pkg.Table, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - if ps := pd.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - pd.mutation.done = true - return affected, err -} - -// PkgDeleteOne is the builder for deleting a single Pkg entity. -type PkgDeleteOne struct { - pd *PkgDelete -} - -// Where appends a list predicates to the PkgDelete builder. -func (pdo *PkgDeleteOne) Where(ps ...predicate.Pkg) *PkgDeleteOne { - pdo.pd.mutation.Where(ps...) - return pdo -} - -// Exec executes the deletion query. -func (pdo *PkgDeleteOne) Exec(ctx context.Context) error { - n, err := pdo.pd.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{pkg.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (pdo *PkgDeleteOne) ExecX(ctx context.Context) { - if err := pdo.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/internal/persistence/ent/db/pkg_query.go b/internal/persistence/ent/db/pkg_query.go deleted file mode 100644 index 90c9c1d97..000000000 --- a/internal/persistence/ent/db/pkg_query.go +++ /dev/null @@ -1,608 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - "database/sql/driver" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" -) - -// PkgQuery is the builder for querying Pkg entities. -type PkgQuery struct { - config - ctx *QueryContext - order []pkg.OrderOption - inters []Interceptor - predicates []predicate.Pkg - withPreservationActions *PreservationActionQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the PkgQuery builder. -func (pq *PkgQuery) Where(ps ...predicate.Pkg) *PkgQuery { - pq.predicates = append(pq.predicates, ps...) - return pq -} - -// Limit the number of records to be returned by this query. -func (pq *PkgQuery) Limit(limit int) *PkgQuery { - pq.ctx.Limit = &limit - return pq -} - -// Offset to start from. -func (pq *PkgQuery) Offset(offset int) *PkgQuery { - pq.ctx.Offset = &offset - return pq -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (pq *PkgQuery) Unique(unique bool) *PkgQuery { - pq.ctx.Unique = &unique - return pq -} - -// Order specifies how the records should be ordered. -func (pq *PkgQuery) Order(o ...pkg.OrderOption) *PkgQuery { - pq.order = append(pq.order, o...) - return pq -} - -// QueryPreservationActions chains the current query on the "preservation_actions" edge. -func (pq *PkgQuery) QueryPreservationActions() *PreservationActionQuery { - query := (&PreservationActionClient{config: pq.config}).Query() - query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pq.prepareQuery(ctx); err != nil { - return nil, err - } - selector := pq.sqlQuery(ctx) - if err := selector.Err(); err != nil { - return nil, err - } - step := sqlgraph.NewStep( - sqlgraph.From(pkg.Table, pkg.FieldID, selector), - sqlgraph.To(preservationaction.Table, preservationaction.FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, pkg.PreservationActionsTable, pkg.PreservationActionsColumn), - ) - fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) - return fromU, nil - } - return query -} - -// First returns the first Pkg entity from the query. -// Returns a *NotFoundError when no Pkg was found. -func (pq *PkgQuery) First(ctx context.Context) (*Pkg, error) { - nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{pkg.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (pq *PkgQuery) FirstX(ctx context.Context) *Pkg { - node, err := pq.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first Pkg ID from the query. -// Returns a *NotFoundError when no Pkg ID was found. -func (pq *PkgQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{pkg.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (pq *PkgQuery) FirstIDX(ctx context.Context) int { - id, err := pq.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single Pkg entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one Pkg entity is found. -// Returns a *NotFoundError when no Pkg entities are found. -func (pq *PkgQuery) Only(ctx context.Context) (*Pkg, error) { - nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{pkg.Label} - default: - return nil, &NotSingularError{pkg.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (pq *PkgQuery) OnlyX(ctx context.Context) *Pkg { - node, err := pq.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only Pkg ID in the query. -// Returns a *NotSingularError when more than one Pkg ID is found. -// Returns a *NotFoundError when no entities are found. -func (pq *PkgQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{pkg.Label} - default: - err = &NotSingularError{pkg.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (pq *PkgQuery) OnlyIDX(ctx context.Context) int { - id, err := pq.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of Pkgs. -func (pq *PkgQuery) All(ctx context.Context) ([]*Pkg, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryAll) - if err := pq.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*Pkg, *PkgQuery]() - return withInterceptors[[]*Pkg](ctx, pq, qr, pq.inters) -} - -// AllX is like All, but panics if an error occurs. -func (pq *PkgQuery) AllX(ctx context.Context) []*Pkg { - nodes, err := pq.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of Pkg IDs. -func (pq *PkgQuery) IDs(ctx context.Context) (ids []int, err error) { - if pq.ctx.Unique == nil && pq.path != nil { - pq.Unique(true) - } - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryIDs) - if err = pq.Select(pkg.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (pq *PkgQuery) IDsX(ctx context.Context) []int { - ids, err := pq.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (pq *PkgQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryCount) - if err := pq.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, pq, querierCount[*PkgQuery](), pq.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (pq *PkgQuery) CountX(ctx context.Context) int { - count, err := pq.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (pq *PkgQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryExist) - switch _, err := pq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("db: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (pq *PkgQuery) ExistX(ctx context.Context) bool { - exist, err := pq.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the PkgQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (pq *PkgQuery) Clone() *PkgQuery { - if pq == nil { - return nil - } - return &PkgQuery{ - config: pq.config, - ctx: pq.ctx.Clone(), - order: append([]pkg.OrderOption{}, pq.order...), - inters: append([]Interceptor{}, pq.inters...), - predicates: append([]predicate.Pkg{}, pq.predicates...), - withPreservationActions: pq.withPreservationActions.Clone(), - // clone intermediate query. - sql: pq.sql.Clone(), - path: pq.path, - } -} - -// WithPreservationActions tells the query-builder to eager-load the nodes that are connected to -// the "preservation_actions" edge. The optional arguments are used to configure the query builder of the edge. -func (pq *PkgQuery) WithPreservationActions(opts ...func(*PreservationActionQuery)) *PkgQuery { - query := (&PreservationActionClient{config: pq.config}).Query() - for _, opt := range opts { - opt(query) - } - pq.withPreservationActions = query - return pq -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// Name string `json:"name,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.Pkg.Query(). -// GroupBy(pkg.FieldName). -// Aggregate(db.Count()). -// Scan(ctx, &v) -func (pq *PkgQuery) GroupBy(field string, fields ...string) *PkgGroupBy { - pq.ctx.Fields = append([]string{field}, fields...) - grbuild := &PkgGroupBy{build: pq} - grbuild.flds = &pq.ctx.Fields - grbuild.label = pkg.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// Name string `json:"name,omitempty"` -// } -// -// client.Pkg.Query(). -// Select(pkg.FieldName). -// Scan(ctx, &v) -func (pq *PkgQuery) Select(fields ...string) *PkgSelect { - pq.ctx.Fields = append(pq.ctx.Fields, fields...) - sbuild := &PkgSelect{PkgQuery: pq} - sbuild.label = pkg.Label - sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a PkgSelect configured with the given aggregations. -func (pq *PkgQuery) Aggregate(fns ...AggregateFunc) *PkgSelect { - return pq.Select().Aggregate(fns...) -} - -func (pq *PkgQuery) prepareQuery(ctx context.Context) error { - for _, inter := range pq.inters { - if inter == nil { - return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, pq); err != nil { - return err - } - } - } - for _, f := range pq.ctx.Fields { - if !pkg.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} - } - } - if pq.path != nil { - prev, err := pq.path(ctx) - if err != nil { - return err - } - pq.sql = prev - } - return nil -} - -func (pq *PkgQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Pkg, error) { - var ( - nodes = []*Pkg{} - _spec = pq.querySpec() - loadedTypes = [1]bool{ - pq.withPreservationActions != nil, - } - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*Pkg).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &Pkg{config: pq.config} - nodes = append(nodes, node) - node.Edges.loadedTypes = loadedTypes - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - if query := pq.withPreservationActions; query != nil { - if err := pq.loadPreservationActions(ctx, query, nodes, - func(n *Pkg) { n.Edges.PreservationActions = []*PreservationAction{} }, - func(n *Pkg, e *PreservationAction) { - n.Edges.PreservationActions = append(n.Edges.PreservationActions, e) - }); err != nil { - return nil, err - } - } - return nodes, nil -} - -func (pq *PkgQuery) loadPreservationActions(ctx context.Context, query *PreservationActionQuery, nodes []*Pkg, init func(*Pkg), assign func(*Pkg, *PreservationAction)) error { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[int]*Pkg) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - if init != nil { - init(nodes[i]) - } - } - if len(query.ctx.Fields) > 0 { - query.ctx.AppendFieldOnce(preservationaction.FieldPackageID) - } - query.Where(predicate.PreservationAction(func(s *sql.Selector) { - s.Where(sql.InValues(s.C(pkg.PreservationActionsColumn), fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return err - } - for _, n := range neighbors { - fk := n.PackageID - node, ok := nodeids[fk] - if !ok { - return fmt.Errorf(`unexpected referenced foreign-key "package_id" returned %v for node %v`, fk, n.ID) - } - assign(node, n) - } - return nil -} - -func (pq *PkgQuery) sqlCount(ctx context.Context) (int, error) { - _spec := pq.querySpec() - _spec.Node.Columns = pq.ctx.Fields - if len(pq.ctx.Fields) > 0 { - _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique - } - return sqlgraph.CountNodes(ctx, pq.driver, _spec) -} - -func (pq *PkgQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(pkg.Table, pkg.Columns, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - _spec.From = pq.sql - if unique := pq.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if pq.path != nil { - _spec.Unique = true - } - if fields := pq.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, pkg.FieldID) - for i := range fields { - if fields[i] != pkg.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - } - if ps := pq.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := pq.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := pq.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := pq.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (pq *PkgQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(pq.driver.Dialect()) - t1 := builder.Table(pkg.Table) - columns := pq.ctx.Fields - if len(columns) == 0 { - columns = pkg.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if pq.sql != nil { - selector = pq.sql - selector.Select(selector.Columns(columns...)...) - } - if pq.ctx.Unique != nil && *pq.ctx.Unique { - selector.Distinct() - } - for _, p := range pq.predicates { - p(selector) - } - for _, p := range pq.order { - p(selector) - } - if offset := pq.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := pq.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// PkgGroupBy is the group-by builder for Pkg entities. -type PkgGroupBy struct { - selector - build *PkgQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (pgb *PkgGroupBy) Aggregate(fns ...AggregateFunc) *PkgGroupBy { - pgb.fns = append(pgb.fns, fns...) - return pgb -} - -// Scan applies the selector query and scans the result into the given value. -func (pgb *PkgGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, pgb.build.ctx, ent.OpQueryGroupBy) - if err := pgb.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*PkgQuery, *PkgGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) -} - -func (pgb *PkgGroupBy) sqlScan(ctx context.Context, root *PkgQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(pgb.fns)) - for _, fn := range pgb.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) - for _, f := range *pgb.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*pgb.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// PkgSelect is the builder for selecting fields of Pkg entities. -type PkgSelect struct { - *PkgQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (ps *PkgSelect) Aggregate(fns ...AggregateFunc) *PkgSelect { - ps.fns = append(ps.fns, fns...) - return ps -} - -// Scan applies the selector query and scans the result into the given value. -func (ps *PkgSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ps.ctx, ent.OpQuerySelect) - if err := ps.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*PkgQuery, *PkgSelect](ctx, ps.PkgQuery, ps, ps.inters, v) -} - -func (ps *PkgSelect) sqlScan(ctx context.Context, root *PkgQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ps.fns)) - for _, fn := range ps.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*ps.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := ps.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/internal/persistence/ent/db/pkg_update.go b/internal/persistence/ent/db/pkg_update.go deleted file mode 100644 index c23ecc7b0..000000000 --- a/internal/persistence/ent/db/pkg_update.go +++ /dev/null @@ -1,704 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" - "github.com/google/uuid" -) - -// PkgUpdate is the builder for updating Pkg entities. -type PkgUpdate struct { - config - hooks []Hook - mutation *PkgMutation -} - -// Where appends a list predicates to the PkgUpdate builder. -func (pu *PkgUpdate) Where(ps ...predicate.Pkg) *PkgUpdate { - pu.mutation.Where(ps...) - return pu -} - -// SetName sets the "name" field. -func (pu *PkgUpdate) SetName(s string) *PkgUpdate { - pu.mutation.SetName(s) - return pu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableName(s *string) *PkgUpdate { - if s != nil { - pu.SetName(*s) - } - return pu -} - -// SetWorkflowID sets the "workflow_id" field. -func (pu *PkgUpdate) SetWorkflowID(s string) *PkgUpdate { - pu.mutation.SetWorkflowID(s) - return pu -} - -// SetNillableWorkflowID sets the "workflow_id" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableWorkflowID(s *string) *PkgUpdate { - if s != nil { - pu.SetWorkflowID(*s) - } - return pu -} - -// SetRunID sets the "run_id" field. -func (pu *PkgUpdate) SetRunID(u uuid.UUID) *PkgUpdate { - pu.mutation.SetRunID(u) - return pu -} - -// SetNillableRunID sets the "run_id" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableRunID(u *uuid.UUID) *PkgUpdate { - if u != nil { - pu.SetRunID(*u) - } - return pu -} - -// SetAipID sets the "aip_id" field. -func (pu *PkgUpdate) SetAipID(u uuid.UUID) *PkgUpdate { - pu.mutation.SetAipID(u) - return pu -} - -// SetNillableAipID sets the "aip_id" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableAipID(u *uuid.UUID) *PkgUpdate { - if u != nil { - pu.SetAipID(*u) - } - return pu -} - -// ClearAipID clears the value of the "aip_id" field. -func (pu *PkgUpdate) ClearAipID() *PkgUpdate { - pu.mutation.ClearAipID() - return pu -} - -// SetLocationID sets the "location_id" field. -func (pu *PkgUpdate) SetLocationID(u uuid.UUID) *PkgUpdate { - pu.mutation.SetLocationID(u) - return pu -} - -// SetNillableLocationID sets the "location_id" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableLocationID(u *uuid.UUID) *PkgUpdate { - if u != nil { - pu.SetLocationID(*u) - } - return pu -} - -// ClearLocationID clears the value of the "location_id" field. -func (pu *PkgUpdate) ClearLocationID() *PkgUpdate { - pu.mutation.ClearLocationID() - return pu -} - -// SetStatus sets the "status" field. -func (pu *PkgUpdate) SetStatus(i int8) *PkgUpdate { - pu.mutation.ResetStatus() - pu.mutation.SetStatus(i) - return pu -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableStatus(i *int8) *PkgUpdate { - if i != nil { - pu.SetStatus(*i) - } - return pu -} - -// AddStatus adds i to the "status" field. -func (pu *PkgUpdate) AddStatus(i int8) *PkgUpdate { - pu.mutation.AddStatus(i) - return pu -} - -// SetStartedAt sets the "started_at" field. -func (pu *PkgUpdate) SetStartedAt(t time.Time) *PkgUpdate { - pu.mutation.SetStartedAt(t) - return pu -} - -// SetNillableStartedAt sets the "started_at" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableStartedAt(t *time.Time) *PkgUpdate { - if t != nil { - pu.SetStartedAt(*t) - } - return pu -} - -// ClearStartedAt clears the value of the "started_at" field. -func (pu *PkgUpdate) ClearStartedAt() *PkgUpdate { - pu.mutation.ClearStartedAt() - return pu -} - -// SetCompletedAt sets the "completed_at" field. -func (pu *PkgUpdate) SetCompletedAt(t time.Time) *PkgUpdate { - pu.mutation.SetCompletedAt(t) - return pu -} - -// SetNillableCompletedAt sets the "completed_at" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableCompletedAt(t *time.Time) *PkgUpdate { - if t != nil { - pu.SetCompletedAt(*t) - } - return pu -} - -// ClearCompletedAt clears the value of the "completed_at" field. -func (pu *PkgUpdate) ClearCompletedAt() *PkgUpdate { - pu.mutation.ClearCompletedAt() - return pu -} - -// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by IDs. -func (pu *PkgUpdate) AddPreservationActionIDs(ids ...int) *PkgUpdate { - pu.mutation.AddPreservationActionIDs(ids...) - return pu -} - -// AddPreservationActions adds the "preservation_actions" edges to the PreservationAction entity. -func (pu *PkgUpdate) AddPreservationActions(p ...*PreservationAction) *PkgUpdate { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID - } - return pu.AddPreservationActionIDs(ids...) -} - -// Mutation returns the PkgMutation object of the builder. -func (pu *PkgUpdate) Mutation() *PkgMutation { - return pu.mutation -} - -// ClearPreservationActions clears all "preservation_actions" edges to the PreservationAction entity. -func (pu *PkgUpdate) ClearPreservationActions() *PkgUpdate { - pu.mutation.ClearPreservationActions() - return pu -} - -// RemovePreservationActionIDs removes the "preservation_actions" edge to PreservationAction entities by IDs. -func (pu *PkgUpdate) RemovePreservationActionIDs(ids ...int) *PkgUpdate { - pu.mutation.RemovePreservationActionIDs(ids...) - return pu -} - -// RemovePreservationActions removes "preservation_actions" edges to PreservationAction entities. -func (pu *PkgUpdate) RemovePreservationActions(p ...*PreservationAction) *PkgUpdate { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID - } - return pu.RemovePreservationActionIDs(ids...) -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (pu *PkgUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (pu *PkgUpdate) SaveX(ctx context.Context) int { - affected, err := pu.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (pu *PkgUpdate) Exec(ctx context.Context) error { - _, err := pu.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (pu *PkgUpdate) ExecX(ctx context.Context) { - if err := pu.Exec(ctx); err != nil { - panic(err) - } -} - -func (pu *PkgUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := sqlgraph.NewUpdateSpec(pkg.Table, pkg.Columns, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - if ps := pu.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := pu.mutation.Name(); ok { - _spec.SetField(pkg.FieldName, field.TypeString, value) - } - if value, ok := pu.mutation.WorkflowID(); ok { - _spec.SetField(pkg.FieldWorkflowID, field.TypeString, value) - } - if value, ok := pu.mutation.RunID(); ok { - _spec.SetField(pkg.FieldRunID, field.TypeUUID, value) - } - if value, ok := pu.mutation.AipID(); ok { - _spec.SetField(pkg.FieldAipID, field.TypeUUID, value) - } - if pu.mutation.AipIDCleared() { - _spec.ClearField(pkg.FieldAipID, field.TypeUUID) - } - if value, ok := pu.mutation.LocationID(); ok { - _spec.SetField(pkg.FieldLocationID, field.TypeUUID, value) - } - if pu.mutation.LocationIDCleared() { - _spec.ClearField(pkg.FieldLocationID, field.TypeUUID) - } - if value, ok := pu.mutation.Status(); ok { - _spec.SetField(pkg.FieldStatus, field.TypeInt8, value) - } - if value, ok := pu.mutation.AddedStatus(); ok { - _spec.AddField(pkg.FieldStatus, field.TypeInt8, value) - } - if value, ok := pu.mutation.StartedAt(); ok { - _spec.SetField(pkg.FieldStartedAt, field.TypeTime, value) - } - if pu.mutation.StartedAtCleared() { - _spec.ClearField(pkg.FieldStartedAt, field.TypeTime) - } - if value, ok := pu.mutation.CompletedAt(); ok { - _spec.SetField(pkg.FieldCompletedAt, field.TypeTime, value) - } - if pu.mutation.CompletedAtCleared() { - _spec.ClearField(pkg.FieldCompletedAt, field.TypeTime) - } - if pu.mutation.PreservationActionsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := pu.mutation.RemovedPreservationActionsIDs(); len(nodes) > 0 && !pu.mutation.PreservationActionsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := pu.mutation.PreservationActionsIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{pkg.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - pu.mutation.done = true - return n, nil -} - -// PkgUpdateOne is the builder for updating a single Pkg entity. -type PkgUpdateOne struct { - config - fields []string - hooks []Hook - mutation *PkgMutation -} - -// SetName sets the "name" field. -func (puo *PkgUpdateOne) SetName(s string) *PkgUpdateOne { - puo.mutation.SetName(s) - return puo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableName(s *string) *PkgUpdateOne { - if s != nil { - puo.SetName(*s) - } - return puo -} - -// SetWorkflowID sets the "workflow_id" field. -func (puo *PkgUpdateOne) SetWorkflowID(s string) *PkgUpdateOne { - puo.mutation.SetWorkflowID(s) - return puo -} - -// SetNillableWorkflowID sets the "workflow_id" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableWorkflowID(s *string) *PkgUpdateOne { - if s != nil { - puo.SetWorkflowID(*s) - } - return puo -} - -// SetRunID sets the "run_id" field. -func (puo *PkgUpdateOne) SetRunID(u uuid.UUID) *PkgUpdateOne { - puo.mutation.SetRunID(u) - return puo -} - -// SetNillableRunID sets the "run_id" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableRunID(u *uuid.UUID) *PkgUpdateOne { - if u != nil { - puo.SetRunID(*u) - } - return puo -} - -// SetAipID sets the "aip_id" field. -func (puo *PkgUpdateOne) SetAipID(u uuid.UUID) *PkgUpdateOne { - puo.mutation.SetAipID(u) - return puo -} - -// SetNillableAipID sets the "aip_id" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableAipID(u *uuid.UUID) *PkgUpdateOne { - if u != nil { - puo.SetAipID(*u) - } - return puo -} - -// ClearAipID clears the value of the "aip_id" field. -func (puo *PkgUpdateOne) ClearAipID() *PkgUpdateOne { - puo.mutation.ClearAipID() - return puo -} - -// SetLocationID sets the "location_id" field. -func (puo *PkgUpdateOne) SetLocationID(u uuid.UUID) *PkgUpdateOne { - puo.mutation.SetLocationID(u) - return puo -} - -// SetNillableLocationID sets the "location_id" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableLocationID(u *uuid.UUID) *PkgUpdateOne { - if u != nil { - puo.SetLocationID(*u) - } - return puo -} - -// ClearLocationID clears the value of the "location_id" field. -func (puo *PkgUpdateOne) ClearLocationID() *PkgUpdateOne { - puo.mutation.ClearLocationID() - return puo -} - -// SetStatus sets the "status" field. -func (puo *PkgUpdateOne) SetStatus(i int8) *PkgUpdateOne { - puo.mutation.ResetStatus() - puo.mutation.SetStatus(i) - return puo -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableStatus(i *int8) *PkgUpdateOne { - if i != nil { - puo.SetStatus(*i) - } - return puo -} - -// AddStatus adds i to the "status" field. -func (puo *PkgUpdateOne) AddStatus(i int8) *PkgUpdateOne { - puo.mutation.AddStatus(i) - return puo -} - -// SetStartedAt sets the "started_at" field. -func (puo *PkgUpdateOne) SetStartedAt(t time.Time) *PkgUpdateOne { - puo.mutation.SetStartedAt(t) - return puo -} - -// SetNillableStartedAt sets the "started_at" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableStartedAt(t *time.Time) *PkgUpdateOne { - if t != nil { - puo.SetStartedAt(*t) - } - return puo -} - -// ClearStartedAt clears the value of the "started_at" field. -func (puo *PkgUpdateOne) ClearStartedAt() *PkgUpdateOne { - puo.mutation.ClearStartedAt() - return puo -} - -// SetCompletedAt sets the "completed_at" field. -func (puo *PkgUpdateOne) SetCompletedAt(t time.Time) *PkgUpdateOne { - puo.mutation.SetCompletedAt(t) - return puo -} - -// SetNillableCompletedAt sets the "completed_at" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableCompletedAt(t *time.Time) *PkgUpdateOne { - if t != nil { - puo.SetCompletedAt(*t) - } - return puo -} - -// ClearCompletedAt clears the value of the "completed_at" field. -func (puo *PkgUpdateOne) ClearCompletedAt() *PkgUpdateOne { - puo.mutation.ClearCompletedAt() - return puo -} - -// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by IDs. -func (puo *PkgUpdateOne) AddPreservationActionIDs(ids ...int) *PkgUpdateOne { - puo.mutation.AddPreservationActionIDs(ids...) - return puo -} - -// AddPreservationActions adds the "preservation_actions" edges to the PreservationAction entity. -func (puo *PkgUpdateOne) AddPreservationActions(p ...*PreservationAction) *PkgUpdateOne { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID - } - return puo.AddPreservationActionIDs(ids...) -} - -// Mutation returns the PkgMutation object of the builder. -func (puo *PkgUpdateOne) Mutation() *PkgMutation { - return puo.mutation -} - -// ClearPreservationActions clears all "preservation_actions" edges to the PreservationAction entity. -func (puo *PkgUpdateOne) ClearPreservationActions() *PkgUpdateOne { - puo.mutation.ClearPreservationActions() - return puo -} - -// RemovePreservationActionIDs removes the "preservation_actions" edge to PreservationAction entities by IDs. -func (puo *PkgUpdateOne) RemovePreservationActionIDs(ids ...int) *PkgUpdateOne { - puo.mutation.RemovePreservationActionIDs(ids...) - return puo -} - -// RemovePreservationActions removes "preservation_actions" edges to PreservationAction entities. -func (puo *PkgUpdateOne) RemovePreservationActions(p ...*PreservationAction) *PkgUpdateOne { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID - } - return puo.RemovePreservationActionIDs(ids...) -} - -// Where appends a list predicates to the PkgUpdate builder. -func (puo *PkgUpdateOne) Where(ps ...predicate.Pkg) *PkgUpdateOne { - puo.mutation.Where(ps...) - return puo -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (puo *PkgUpdateOne) Select(field string, fields ...string) *PkgUpdateOne { - puo.fields = append([]string{field}, fields...) - return puo -} - -// Save executes the query and returns the updated Pkg entity. -func (puo *PkgUpdateOne) Save(ctx context.Context) (*Pkg, error) { - return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (puo *PkgUpdateOne) SaveX(ctx context.Context) *Pkg { - node, err := puo.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (puo *PkgUpdateOne) Exec(ctx context.Context) error { - _, err := puo.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (puo *PkgUpdateOne) ExecX(ctx context.Context) { - if err := puo.Exec(ctx); err != nil { - panic(err) - } -} - -func (puo *PkgUpdateOne) sqlSave(ctx context.Context) (_node *Pkg, err error) { - _spec := sqlgraph.NewUpdateSpec(pkg.Table, pkg.Columns, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - id, ok := puo.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "Pkg.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := puo.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, pkg.FieldID) - for _, f := range fields { - if !pkg.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} - } - if f != pkg.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := puo.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := puo.mutation.Name(); ok { - _spec.SetField(pkg.FieldName, field.TypeString, value) - } - if value, ok := puo.mutation.WorkflowID(); ok { - _spec.SetField(pkg.FieldWorkflowID, field.TypeString, value) - } - if value, ok := puo.mutation.RunID(); ok { - _spec.SetField(pkg.FieldRunID, field.TypeUUID, value) - } - if value, ok := puo.mutation.AipID(); ok { - _spec.SetField(pkg.FieldAipID, field.TypeUUID, value) - } - if puo.mutation.AipIDCleared() { - _spec.ClearField(pkg.FieldAipID, field.TypeUUID) - } - if value, ok := puo.mutation.LocationID(); ok { - _spec.SetField(pkg.FieldLocationID, field.TypeUUID, value) - } - if puo.mutation.LocationIDCleared() { - _spec.ClearField(pkg.FieldLocationID, field.TypeUUID) - } - if value, ok := puo.mutation.Status(); ok { - _spec.SetField(pkg.FieldStatus, field.TypeInt8, value) - } - if value, ok := puo.mutation.AddedStatus(); ok { - _spec.AddField(pkg.FieldStatus, field.TypeInt8, value) - } - if value, ok := puo.mutation.StartedAt(); ok { - _spec.SetField(pkg.FieldStartedAt, field.TypeTime, value) - } - if puo.mutation.StartedAtCleared() { - _spec.ClearField(pkg.FieldStartedAt, field.TypeTime) - } - if value, ok := puo.mutation.CompletedAt(); ok { - _spec.SetField(pkg.FieldCompletedAt, field.TypeTime, value) - } - if puo.mutation.CompletedAtCleared() { - _spec.ClearField(pkg.FieldCompletedAt, field.TypeTime) - } - if puo.mutation.PreservationActionsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := puo.mutation.RemovedPreservationActionsIDs(); len(nodes) > 0 && !puo.mutation.PreservationActionsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := puo.mutation.PreservationActionsIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: false, - Table: pkg.PreservationActionsTable, - Columns: []string{pkg.PreservationActionsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - _node = &Pkg{config: puo.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{pkg.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - puo.mutation.done = true - return _node, nil -} diff --git a/internal/persistence/ent/db/predicate/predicate.go b/internal/persistence/ent/db/predicate/predicate.go index 9dbac7b3f..66e6ef86c 100644 --- a/internal/persistence/ent/db/predicate/predicate.go +++ b/internal/persistence/ent/db/predicate/predicate.go @@ -6,11 +6,11 @@ import ( "entgo.io/ent/dialect/sql" ) -// Pkg is the predicate function for pkg builders. -type Pkg func(*sql.Selector) - // PreservationAction is the predicate function for preservationaction builders. type PreservationAction func(*sql.Selector) // PreservationTask is the predicate function for preservationtask builders. type PreservationTask func(*sql.Selector) + +// SIP is the predicate function for sip builders. +type SIP func(*sql.Selector) diff --git a/internal/persistence/ent/db/preservationaction.go b/internal/persistence/ent/db/preservationaction.go index 2a66d70e0..a446db121 100644 --- a/internal/persistence/ent/db/preservationaction.go +++ b/internal/persistence/ent/db/preservationaction.go @@ -9,8 +9,8 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) // PreservationAction is the model entity for the PreservationAction schema. @@ -28,8 +28,8 @@ type PreservationAction struct { StartedAt time.Time `json:"started_at,omitempty"` // CompletedAt holds the value of the "completed_at" field. CompletedAt time.Time `json:"completed_at,omitempty"` - // PackageID holds the value of the "package_id" field. - PackageID int `json:"package_id,omitempty"` + // SipID holds the value of the "sip_id" field. + SipID int `json:"sip_id,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the PreservationActionQuery when eager-loading is set. Edges PreservationActionEdges `json:"edges"` @@ -38,8 +38,8 @@ type PreservationAction struct { // PreservationActionEdges holds the relations/edges for other nodes in the graph. type PreservationActionEdges struct { - // Package holds the value of the package edge. - Package *Pkg `json:"package,omitempty"` + // Sip holds the value of the sip edge. + Sip *SIP `json:"sip,omitempty"` // Tasks holds the value of the tasks edge. Tasks []*PreservationTask `json:"tasks,omitempty"` // loadedTypes holds the information for reporting if a @@ -47,15 +47,15 @@ type PreservationActionEdges struct { loadedTypes [2]bool } -// PackageOrErr returns the Package value or an error if the edge +// SipOrErr returns the Sip value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. -func (e PreservationActionEdges) PackageOrErr() (*Pkg, error) { - if e.Package != nil { - return e.Package, nil +func (e PreservationActionEdges) SipOrErr() (*SIP, error) { + if e.Sip != nil { + return e.Sip, nil } else if e.loadedTypes[0] { - return nil, &NotFoundError{label: pkg.Label} + return nil, &NotFoundError{label: sip.Label} } - return nil, &NotLoadedError{edge: "package"} + return nil, &NotLoadedError{edge: "sip"} } // TasksOrErr returns the Tasks value or an error if the edge @@ -72,7 +72,7 @@ func (*PreservationAction) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case preservationaction.FieldID, preservationaction.FieldType, preservationaction.FieldStatus, preservationaction.FieldPackageID: + case preservationaction.FieldID, preservationaction.FieldType, preservationaction.FieldStatus, preservationaction.FieldSipID: values[i] = new(sql.NullInt64) case preservationaction.FieldWorkflowID: values[i] = new(sql.NullString) @@ -129,11 +129,11 @@ func (pa *PreservationAction) assignValues(columns []string, values []any) error } else if value.Valid { pa.CompletedAt = value.Time } - case preservationaction.FieldPackageID: + case preservationaction.FieldSipID: if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field package_id", values[i]) + return fmt.Errorf("unexpected type %T for field sip_id", values[i]) } else if value.Valid { - pa.PackageID = int(value.Int64) + pa.SipID = int(value.Int64) } default: pa.selectValues.Set(columns[i], values[i]) @@ -148,9 +148,9 @@ func (pa *PreservationAction) Value(name string) (ent.Value, error) { return pa.selectValues.Get(name) } -// QueryPackage queries the "package" edge of the PreservationAction entity. -func (pa *PreservationAction) QueryPackage() *PkgQuery { - return NewPreservationActionClient(pa.config).QueryPackage(pa) +// QuerySip queries the "sip" edge of the PreservationAction entity. +func (pa *PreservationAction) QuerySip() *SIPQuery { + return NewPreservationActionClient(pa.config).QuerySip(pa) } // QueryTasks queries the "tasks" edge of the PreservationAction entity. @@ -196,8 +196,8 @@ func (pa *PreservationAction) String() string { builder.WriteString("completed_at=") builder.WriteString(pa.CompletedAt.Format(time.ANSIC)) builder.WriteString(", ") - builder.WriteString("package_id=") - builder.WriteString(fmt.Sprintf("%v", pa.PackageID)) + builder.WriteString("sip_id=") + builder.WriteString(fmt.Sprintf("%v", pa.SipID)) builder.WriteByte(')') return builder.String() } diff --git a/internal/persistence/ent/db/preservationaction/preservationaction.go b/internal/persistence/ent/db/preservationaction/preservationaction.go index c99a28b4f..a197235e8 100644 --- a/internal/persistence/ent/db/preservationaction/preservationaction.go +++ b/internal/persistence/ent/db/preservationaction/preservationaction.go @@ -22,21 +22,21 @@ const ( FieldStartedAt = "started_at" // FieldCompletedAt holds the string denoting the completed_at field in the database. FieldCompletedAt = "completed_at" - // FieldPackageID holds the string denoting the package_id field in the database. - FieldPackageID = "package_id" - // EdgePackage holds the string denoting the package edge name in mutations. - EdgePackage = "package" + // FieldSipID holds the string denoting the sip_id field in the database. + FieldSipID = "sip_id" + // EdgeSip holds the string denoting the sip edge name in mutations. + EdgeSip = "sip" // EdgeTasks holds the string denoting the tasks edge name in mutations. EdgeTasks = "tasks" // Table holds the table name of the preservationaction in the database. Table = "preservation_action" - // PackageTable is the table that holds the package relation/edge. - PackageTable = "preservation_action" - // PackageInverseTable is the table name for the Pkg entity. - // It exists in this package in order to avoid circular dependency with the "pkg" package. - PackageInverseTable = "package" - // PackageColumn is the table column denoting the package relation/edge. - PackageColumn = "package_id" + // SipTable is the table that holds the sip relation/edge. + SipTable = "preservation_action" + // SipInverseTable is the table name for the SIP entity. + // It exists in this package in order to avoid circular dependency with the "sip" package. + SipInverseTable = "sip" + // SipColumn is the table column denoting the sip relation/edge. + SipColumn = "sip_id" // TasksTable is the table that holds the tasks relation/edge. TasksTable = "preservation_task" // TasksInverseTable is the table name for the PreservationTask entity. @@ -54,7 +54,7 @@ var Columns = []string{ FieldStatus, FieldStartedAt, FieldCompletedAt, - FieldPackageID, + FieldSipID, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -68,8 +68,8 @@ func ValidColumn(column string) bool { } var ( - // PackageIDValidator is a validator for the "package_id" field. It is called by the builders before save. - PackageIDValidator func(int) error + // SipIDValidator is a validator for the "sip_id" field. It is called by the builders before save. + SipIDValidator func(int) error ) // OrderOption defines the ordering options for the PreservationAction queries. @@ -105,15 +105,15 @@ func ByCompletedAt(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldCompletedAt, opts...).ToFunc() } -// ByPackageID orders the results by the package_id field. -func ByPackageID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldPackageID, opts...).ToFunc() +// BySipID orders the results by the sip_id field. +func BySipID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSipID, opts...).ToFunc() } -// ByPackageField orders the results by package field. -func ByPackageField(field string, opts ...sql.OrderTermOption) OrderOption { +// BySipField orders the results by sip field. +func BySipField(field string, opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { - sqlgraph.OrderByNeighborTerms(s, newPackageStep(), sql.OrderByField(field, opts...)) + sqlgraph.OrderByNeighborTerms(s, newSipStep(), sql.OrderByField(field, opts...)) } } @@ -130,11 +130,11 @@ func ByTasks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { sqlgraph.OrderByNeighborTerms(s, newTasksStep(), append([]sql.OrderTerm{term}, terms...)...) } } -func newPackageStep() *sqlgraph.Step { +func newSipStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PackageInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, PackageTable, PackageColumn), + sqlgraph.To(SipInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, SipTable, SipColumn), ) } func newTasksStep() *sqlgraph.Step { diff --git a/internal/persistence/ent/db/preservationaction/where.go b/internal/persistence/ent/db/preservationaction/where.go index f9cff848b..cb632f0c6 100644 --- a/internal/persistence/ent/db/preservationaction/where.go +++ b/internal/persistence/ent/db/preservationaction/where.go @@ -80,9 +80,9 @@ func CompletedAt(v time.Time) predicate.PreservationAction { return predicate.PreservationAction(sql.FieldEQ(FieldCompletedAt, v)) } -// PackageID applies equality check predicate on the "package_id" field. It's identical to PackageIDEQ. -func PackageID(v int) predicate.PreservationAction { - return predicate.PreservationAction(sql.FieldEQ(FieldPackageID, v)) +// SipID applies equality check predicate on the "sip_id" field. It's identical to SipIDEQ. +func SipID(v int) predicate.PreservationAction { + return predicate.PreservationAction(sql.FieldEQ(FieldSipID, v)) } // WorkflowIDEQ applies the EQ predicate on the "workflow_id" field. @@ -330,41 +330,41 @@ func CompletedAtNotNil() predicate.PreservationAction { return predicate.PreservationAction(sql.FieldNotNull(FieldCompletedAt)) } -// PackageIDEQ applies the EQ predicate on the "package_id" field. -func PackageIDEQ(v int) predicate.PreservationAction { - return predicate.PreservationAction(sql.FieldEQ(FieldPackageID, v)) +// SipIDEQ applies the EQ predicate on the "sip_id" field. +func SipIDEQ(v int) predicate.PreservationAction { + return predicate.PreservationAction(sql.FieldEQ(FieldSipID, v)) } -// PackageIDNEQ applies the NEQ predicate on the "package_id" field. -func PackageIDNEQ(v int) predicate.PreservationAction { - return predicate.PreservationAction(sql.FieldNEQ(FieldPackageID, v)) +// SipIDNEQ applies the NEQ predicate on the "sip_id" field. +func SipIDNEQ(v int) predicate.PreservationAction { + return predicate.PreservationAction(sql.FieldNEQ(FieldSipID, v)) } -// PackageIDIn applies the In predicate on the "package_id" field. -func PackageIDIn(vs ...int) predicate.PreservationAction { - return predicate.PreservationAction(sql.FieldIn(FieldPackageID, vs...)) +// SipIDIn applies the In predicate on the "sip_id" field. +func SipIDIn(vs ...int) predicate.PreservationAction { + return predicate.PreservationAction(sql.FieldIn(FieldSipID, vs...)) } -// PackageIDNotIn applies the NotIn predicate on the "package_id" field. -func PackageIDNotIn(vs ...int) predicate.PreservationAction { - return predicate.PreservationAction(sql.FieldNotIn(FieldPackageID, vs...)) +// SipIDNotIn applies the NotIn predicate on the "sip_id" field. +func SipIDNotIn(vs ...int) predicate.PreservationAction { + return predicate.PreservationAction(sql.FieldNotIn(FieldSipID, vs...)) } -// HasPackage applies the HasEdge predicate on the "package" edge. -func HasPackage() predicate.PreservationAction { +// HasSip applies the HasEdge predicate on the "sip" edge. +func HasSip() predicate.PreservationAction { return predicate.PreservationAction(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, PackageTable, PackageColumn), + sqlgraph.Edge(sqlgraph.M2O, true, SipTable, SipColumn), ) sqlgraph.HasNeighbors(s, step) }) } -// HasPackageWith applies the HasEdge predicate on the "package" edge with a given conditions (other predicates). -func HasPackageWith(preds ...predicate.Pkg) predicate.PreservationAction { +// HasSipWith applies the HasEdge predicate on the "sip" edge with a given conditions (other predicates). +func HasSipWith(preds ...predicate.SIP) predicate.PreservationAction { return predicate.PreservationAction(func(s *sql.Selector) { - step := newPackageStep() + step := newSipStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) diff --git a/internal/persistence/ent/db/preservationaction_create.go b/internal/persistence/ent/db/preservationaction_create.go index 7f9291255..26c57a478 100644 --- a/internal/persistence/ent/db/preservationaction_create.go +++ b/internal/persistence/ent/db/preservationaction_create.go @@ -10,9 +10,9 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) // PreservationActionCreate is the builder for creating a PreservationAction entity. @@ -68,15 +68,15 @@ func (pac *PreservationActionCreate) SetNillableCompletedAt(t *time.Time) *Prese return pac } -// SetPackageID sets the "package_id" field. -func (pac *PreservationActionCreate) SetPackageID(i int) *PreservationActionCreate { - pac.mutation.SetPackageID(i) +// SetSipID sets the "sip_id" field. +func (pac *PreservationActionCreate) SetSipID(i int) *PreservationActionCreate { + pac.mutation.SetSipID(i) return pac } -// SetPackage sets the "package" edge to the Pkg entity. -func (pac *PreservationActionCreate) SetPackage(p *Pkg) *PreservationActionCreate { - return pac.SetPackageID(p.ID) +// SetSip sets the "sip" edge to the SIP entity. +func (pac *PreservationActionCreate) SetSip(s *SIP) *PreservationActionCreate { + return pac.SetSipID(s.ID) } // AddTaskIDs adds the "tasks" edge to the PreservationTask entity by IDs. @@ -137,16 +137,16 @@ func (pac *PreservationActionCreate) check() error { if _, ok := pac.mutation.Status(); !ok { return &ValidationError{Name: "status", err: errors.New(`db: missing required field "PreservationAction.status"`)} } - if _, ok := pac.mutation.PackageID(); !ok { - return &ValidationError{Name: "package_id", err: errors.New(`db: missing required field "PreservationAction.package_id"`)} + if _, ok := pac.mutation.SipID(); !ok { + return &ValidationError{Name: "sip_id", err: errors.New(`db: missing required field "PreservationAction.sip_id"`)} } - if v, ok := pac.mutation.PackageID(); ok { - if err := preservationaction.PackageIDValidator(v); err != nil { - return &ValidationError{Name: "package_id", err: fmt.Errorf(`db: validator failed for field "PreservationAction.package_id": %w`, err)} + if v, ok := pac.mutation.SipID(); ok { + if err := preservationaction.SipIDValidator(v); err != nil { + return &ValidationError{Name: "sip_id", err: fmt.Errorf(`db: validator failed for field "PreservationAction.sip_id": %w`, err)} } } - if len(pac.mutation.PackageIDs()) == 0 { - return &ValidationError{Name: "package", err: errors.New(`db: missing required edge "PreservationAction.package"`)} + if len(pac.mutation.SipIDs()) == 0 { + return &ValidationError{Name: "sip", err: errors.New(`db: missing required edge "PreservationAction.sip"`)} } return nil } @@ -194,21 +194,21 @@ func (pac *PreservationActionCreate) createSpec() (*PreservationAction, *sqlgrap _spec.SetField(preservationaction.FieldCompletedAt, field.TypeTime, value) _node.CompletedAt = value } - if nodes := pac.mutation.PackageIDs(); len(nodes) > 0 { + if nodes := pac.mutation.SipIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, - Table: preservationaction.PackageTable, - Columns: []string{preservationaction.PackageColumn}, + Table: preservationaction.SipTable, + Columns: []string{preservationaction.SipColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt), }, } for _, k := range nodes { edge.Target.Nodes = append(edge.Target.Nodes, k) } - _node.PackageID = nodes[0] + _node.SipID = nodes[0] _spec.Edges = append(_spec.Edges, edge) } if nodes := pac.mutation.TasksIDs(); len(nodes) > 0 { diff --git a/internal/persistence/ent/db/preservationaction_query.go b/internal/persistence/ent/db/preservationaction_query.go index a4a977e1c..7de21d182 100644 --- a/internal/persistence/ent/db/preservationaction_query.go +++ b/internal/persistence/ent/db/preservationaction_query.go @@ -12,21 +12,21 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) // PreservationActionQuery is the builder for querying PreservationAction entities. type PreservationActionQuery struct { config - ctx *QueryContext - order []preservationaction.OrderOption - inters []Interceptor - predicates []predicate.PreservationAction - withPackage *PkgQuery - withTasks *PreservationTaskQuery + ctx *QueryContext + order []preservationaction.OrderOption + inters []Interceptor + predicates []predicate.PreservationAction + withSip *SIPQuery + withTasks *PreservationTaskQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -63,9 +63,9 @@ func (paq *PreservationActionQuery) Order(o ...preservationaction.OrderOption) * return paq } -// QueryPackage chains the current query on the "package" edge. -func (paq *PreservationActionQuery) QueryPackage() *PkgQuery { - query := (&PkgClient{config: paq.config}).Query() +// QuerySip chains the current query on the "sip" edge. +func (paq *PreservationActionQuery) QuerySip() *SIPQuery { + query := (&SIPClient{config: paq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := paq.prepareQuery(ctx); err != nil { return nil, err @@ -76,8 +76,8 @@ func (paq *PreservationActionQuery) QueryPackage() *PkgQuery { } step := sqlgraph.NewStep( sqlgraph.From(preservationaction.Table, preservationaction.FieldID, selector), - sqlgraph.To(pkg.Table, pkg.FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, preservationaction.PackageTable, preservationaction.PackageColumn), + sqlgraph.To(sip.Table, sip.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, preservationaction.SipTable, preservationaction.SipColumn), ) fromU = sqlgraph.SetNeighbors(paq.driver.Dialect(), step) return fromU, nil @@ -294,27 +294,27 @@ func (paq *PreservationActionQuery) Clone() *PreservationActionQuery { return nil } return &PreservationActionQuery{ - config: paq.config, - ctx: paq.ctx.Clone(), - order: append([]preservationaction.OrderOption{}, paq.order...), - inters: append([]Interceptor{}, paq.inters...), - predicates: append([]predicate.PreservationAction{}, paq.predicates...), - withPackage: paq.withPackage.Clone(), - withTasks: paq.withTasks.Clone(), + config: paq.config, + ctx: paq.ctx.Clone(), + order: append([]preservationaction.OrderOption{}, paq.order...), + inters: append([]Interceptor{}, paq.inters...), + predicates: append([]predicate.PreservationAction{}, paq.predicates...), + withSip: paq.withSip.Clone(), + withTasks: paq.withTasks.Clone(), // clone intermediate query. sql: paq.sql.Clone(), path: paq.path, } } -// WithPackage tells the query-builder to eager-load the nodes that are connected to -// the "package" edge. The optional arguments are used to configure the query builder of the edge. -func (paq *PreservationActionQuery) WithPackage(opts ...func(*PkgQuery)) *PreservationActionQuery { - query := (&PkgClient{config: paq.config}).Query() +// WithSip tells the query-builder to eager-load the nodes that are connected to +// the "sip" edge. The optional arguments are used to configure the query builder of the edge. +func (paq *PreservationActionQuery) WithSip(opts ...func(*SIPQuery)) *PreservationActionQuery { + query := (&SIPClient{config: paq.config}).Query() for _, opt := range opts { opt(query) } - paq.withPackage = query + paq.withSip = query return paq } @@ -408,7 +408,7 @@ func (paq *PreservationActionQuery) sqlAll(ctx context.Context, hooks ...queryHo nodes = []*PreservationAction{} _spec = paq.querySpec() loadedTypes = [2]bool{ - paq.withPackage != nil, + paq.withSip != nil, paq.withTasks != nil, } ) @@ -430,9 +430,9 @@ func (paq *PreservationActionQuery) sqlAll(ctx context.Context, hooks ...queryHo if len(nodes) == 0 { return nodes, nil } - if query := paq.withPackage; query != nil { - if err := paq.loadPackage(ctx, query, nodes, nil, - func(n *PreservationAction, e *Pkg) { n.Edges.Package = e }); err != nil { + if query := paq.withSip; query != nil { + if err := paq.loadSip(ctx, query, nodes, nil, + func(n *PreservationAction, e *SIP) { n.Edges.Sip = e }); err != nil { return nil, err } } @@ -446,11 +446,11 @@ func (paq *PreservationActionQuery) sqlAll(ctx context.Context, hooks ...queryHo return nodes, nil } -func (paq *PreservationActionQuery) loadPackage(ctx context.Context, query *PkgQuery, nodes []*PreservationAction, init func(*PreservationAction), assign func(*PreservationAction, *Pkg)) error { +func (paq *PreservationActionQuery) loadSip(ctx context.Context, query *SIPQuery, nodes []*PreservationAction, init func(*PreservationAction), assign func(*PreservationAction, *SIP)) error { ids := make([]int, 0, len(nodes)) nodeids := make(map[int][]*PreservationAction) for i := range nodes { - fk := nodes[i].PackageID + fk := nodes[i].SipID if _, ok := nodeids[fk]; !ok { ids = append(ids, fk) } @@ -459,7 +459,7 @@ func (paq *PreservationActionQuery) loadPackage(ctx context.Context, query *PkgQ if len(ids) == 0 { return nil } - query.Where(pkg.IDIn(ids...)) + query.Where(sip.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { return err @@ -467,7 +467,7 @@ func (paq *PreservationActionQuery) loadPackage(ctx context.Context, query *PkgQ for _, n := range neighbors { nodes, ok := nodeids[n.ID] if !ok { - return fmt.Errorf(`unexpected foreign-key "package_id" returned %v`, n.ID) + return fmt.Errorf(`unexpected foreign-key "sip_id" returned %v`, n.ID) } for i := range nodes { assign(nodes[i], n) @@ -531,8 +531,8 @@ func (paq *PreservationActionQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } - if paq.withPackage != nil { - _spec.Node.AddColumnOnce(preservationaction.FieldPackageID) + if paq.withSip != nil { + _spec.Node.AddColumnOnce(preservationaction.FieldSipID) } } if ps := paq.predicates; len(ps) > 0 { diff --git a/internal/persistence/ent/db/preservationaction_update.go b/internal/persistence/ent/db/preservationaction_update.go index e0f145d3e..88cf778ff 100644 --- a/internal/persistence/ent/db/preservationaction_update.go +++ b/internal/persistence/ent/db/preservationaction_update.go @@ -11,10 +11,10 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" ) // PreservationActionUpdate is the builder for updating PreservationAction entities. @@ -126,23 +126,23 @@ func (pau *PreservationActionUpdate) ClearCompletedAt() *PreservationActionUpdat return pau } -// SetPackageID sets the "package_id" field. -func (pau *PreservationActionUpdate) SetPackageID(i int) *PreservationActionUpdate { - pau.mutation.SetPackageID(i) +// SetSipID sets the "sip_id" field. +func (pau *PreservationActionUpdate) SetSipID(i int) *PreservationActionUpdate { + pau.mutation.SetSipID(i) return pau } -// SetNillablePackageID sets the "package_id" field if the given value is not nil. -func (pau *PreservationActionUpdate) SetNillablePackageID(i *int) *PreservationActionUpdate { +// SetNillableSipID sets the "sip_id" field if the given value is not nil. +func (pau *PreservationActionUpdate) SetNillableSipID(i *int) *PreservationActionUpdate { if i != nil { - pau.SetPackageID(*i) + pau.SetSipID(*i) } return pau } -// SetPackage sets the "package" edge to the Pkg entity. -func (pau *PreservationActionUpdate) SetPackage(p *Pkg) *PreservationActionUpdate { - return pau.SetPackageID(p.ID) +// SetSip sets the "sip" edge to the SIP entity. +func (pau *PreservationActionUpdate) SetSip(s *SIP) *PreservationActionUpdate { + return pau.SetSipID(s.ID) } // AddTaskIDs adds the "tasks" edge to the PreservationTask entity by IDs. @@ -165,9 +165,9 @@ func (pau *PreservationActionUpdate) Mutation() *PreservationActionMutation { return pau.mutation } -// ClearPackage clears the "package" edge to the Pkg entity. -func (pau *PreservationActionUpdate) ClearPackage() *PreservationActionUpdate { - pau.mutation.ClearPackage() +// ClearSip clears the "sip" edge to the SIP entity. +func (pau *PreservationActionUpdate) ClearSip() *PreservationActionUpdate { + pau.mutation.ClearSip() return pau } @@ -221,13 +221,13 @@ func (pau *PreservationActionUpdate) ExecX(ctx context.Context) { // check runs all checks and user-defined validators on the builder. func (pau *PreservationActionUpdate) check() error { - if v, ok := pau.mutation.PackageID(); ok { - if err := preservationaction.PackageIDValidator(v); err != nil { - return &ValidationError{Name: "package_id", err: fmt.Errorf(`db: validator failed for field "PreservationAction.package_id": %w`, err)} + if v, ok := pau.mutation.SipID(); ok { + if err := preservationaction.SipIDValidator(v); err != nil { + return &ValidationError{Name: "sip_id", err: fmt.Errorf(`db: validator failed for field "PreservationAction.sip_id": %w`, err)} } } - if pau.mutation.PackageCleared() && len(pau.mutation.PackageIDs()) > 0 { - return errors.New(`db: clearing a required unique edge "PreservationAction.package"`) + if pau.mutation.SipCleared() && len(pau.mutation.SipIDs()) > 0 { + return errors.New(`db: clearing a required unique edge "PreservationAction.sip"`) } return nil } @@ -271,28 +271,28 @@ func (pau *PreservationActionUpdate) sqlSave(ctx context.Context) (n int, err er if pau.mutation.CompletedAtCleared() { _spec.ClearField(preservationaction.FieldCompletedAt, field.TypeTime) } - if pau.mutation.PackageCleared() { + if pau.mutation.SipCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, - Table: preservationaction.PackageTable, - Columns: []string{preservationaction.PackageColumn}, + Table: preservationaction.SipTable, + Columns: []string{preservationaction.SipColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pau.mutation.PackageIDs(); len(nodes) > 0 { + if nodes := pau.mutation.SipIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, - Table: preservationaction.PackageTable, - Columns: []string{preservationaction.PackageColumn}, + Table: preservationaction.SipTable, + Columns: []string{preservationaction.SipColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -461,23 +461,23 @@ func (pauo *PreservationActionUpdateOne) ClearCompletedAt() *PreservationActionU return pauo } -// SetPackageID sets the "package_id" field. -func (pauo *PreservationActionUpdateOne) SetPackageID(i int) *PreservationActionUpdateOne { - pauo.mutation.SetPackageID(i) +// SetSipID sets the "sip_id" field. +func (pauo *PreservationActionUpdateOne) SetSipID(i int) *PreservationActionUpdateOne { + pauo.mutation.SetSipID(i) return pauo } -// SetNillablePackageID sets the "package_id" field if the given value is not nil. -func (pauo *PreservationActionUpdateOne) SetNillablePackageID(i *int) *PreservationActionUpdateOne { +// SetNillableSipID sets the "sip_id" field if the given value is not nil. +func (pauo *PreservationActionUpdateOne) SetNillableSipID(i *int) *PreservationActionUpdateOne { if i != nil { - pauo.SetPackageID(*i) + pauo.SetSipID(*i) } return pauo } -// SetPackage sets the "package" edge to the Pkg entity. -func (pauo *PreservationActionUpdateOne) SetPackage(p *Pkg) *PreservationActionUpdateOne { - return pauo.SetPackageID(p.ID) +// SetSip sets the "sip" edge to the SIP entity. +func (pauo *PreservationActionUpdateOne) SetSip(s *SIP) *PreservationActionUpdateOne { + return pauo.SetSipID(s.ID) } // AddTaskIDs adds the "tasks" edge to the PreservationTask entity by IDs. @@ -500,9 +500,9 @@ func (pauo *PreservationActionUpdateOne) Mutation() *PreservationActionMutation return pauo.mutation } -// ClearPackage clears the "package" edge to the Pkg entity. -func (pauo *PreservationActionUpdateOne) ClearPackage() *PreservationActionUpdateOne { - pauo.mutation.ClearPackage() +// ClearSip clears the "sip" edge to the SIP entity. +func (pauo *PreservationActionUpdateOne) ClearSip() *PreservationActionUpdateOne { + pauo.mutation.ClearSip() return pauo } @@ -569,13 +569,13 @@ func (pauo *PreservationActionUpdateOne) ExecX(ctx context.Context) { // check runs all checks and user-defined validators on the builder. func (pauo *PreservationActionUpdateOne) check() error { - if v, ok := pauo.mutation.PackageID(); ok { - if err := preservationaction.PackageIDValidator(v); err != nil { - return &ValidationError{Name: "package_id", err: fmt.Errorf(`db: validator failed for field "PreservationAction.package_id": %w`, err)} + if v, ok := pauo.mutation.SipID(); ok { + if err := preservationaction.SipIDValidator(v); err != nil { + return &ValidationError{Name: "sip_id", err: fmt.Errorf(`db: validator failed for field "PreservationAction.sip_id": %w`, err)} } } - if pauo.mutation.PackageCleared() && len(pauo.mutation.PackageIDs()) > 0 { - return errors.New(`db: clearing a required unique edge "PreservationAction.package"`) + if pauo.mutation.SipCleared() && len(pauo.mutation.SipIDs()) > 0 { + return errors.New(`db: clearing a required unique edge "PreservationAction.sip"`) } return nil } @@ -636,28 +636,28 @@ func (pauo *PreservationActionUpdateOne) sqlSave(ctx context.Context) (_node *Pr if pauo.mutation.CompletedAtCleared() { _spec.ClearField(preservationaction.FieldCompletedAt, field.TypeTime) } - if pauo.mutation.PackageCleared() { + if pauo.mutation.SipCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, - Table: preservationaction.PackageTable, - Columns: []string{preservationaction.PackageColumn}, + Table: preservationaction.SipTable, + Columns: []string{preservationaction.SipColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := pauo.mutation.PackageIDs(); len(nodes) > 0 { + if nodes := pauo.mutation.SipIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, Inverse: true, - Table: preservationaction.PackageTable, - Columns: []string{preservationaction.PackageColumn}, + Table: preservationaction.SipTable, + Columns: []string{preservationaction.SipColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt), }, } for _, k := range nodes { diff --git a/internal/persistence/ent/db/runtime.go b/internal/persistence/ent/db/runtime.go index e34e974d8..4a7bf71f3 100644 --- a/internal/persistence/ent/db/runtime.go +++ b/internal/persistence/ent/db/runtime.go @@ -5,9 +5,9 @@ package db import ( "time" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationtask" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" "github.com/artefactual-sdps/enduro/internal/persistence/ent/schema" ) @@ -15,22 +15,22 @@ import ( // (default values, validators, hooks and policies) and stitches it // to their package variables. func init() { - pkgFields := schema.Pkg{}.Fields() - _ = pkgFields - // pkgDescCreatedAt is the schema descriptor for created_at field. - pkgDescCreatedAt := pkgFields[6].Descriptor() - // pkg.DefaultCreatedAt holds the default value on creation for the created_at field. - pkg.DefaultCreatedAt = pkgDescCreatedAt.Default.(func() time.Time) preservationactionFields := schema.PreservationAction{}.Fields() _ = preservationactionFields - // preservationactionDescPackageID is the schema descriptor for package_id field. - preservationactionDescPackageID := preservationactionFields[5].Descriptor() - // preservationaction.PackageIDValidator is a validator for the "package_id" field. It is called by the builders before save. - preservationaction.PackageIDValidator = preservationactionDescPackageID.Validators[0].(func(int) error) + // preservationactionDescSipID is the schema descriptor for sip_id field. + preservationactionDescSipID := preservationactionFields[5].Descriptor() + // preservationaction.SipIDValidator is a validator for the "sip_id" field. It is called by the builders before save. + preservationaction.SipIDValidator = preservationactionDescSipID.Validators[0].(func(int) error) preservationtaskFields := schema.PreservationTask{}.Fields() _ = preservationtaskFields // preservationtaskDescPreservationActionID is the schema descriptor for preservation_action_id field. preservationtaskDescPreservationActionID := preservationtaskFields[6].Descriptor() // preservationtask.PreservationActionIDValidator is a validator for the "preservation_action_id" field. It is called by the builders before save. preservationtask.PreservationActionIDValidator = preservationtaskDescPreservationActionID.Validators[0].(func(int) error) + sipFields := schema.SIP{}.Fields() + _ = sipFields + // sipDescCreatedAt is the schema descriptor for created_at field. + sipDescCreatedAt := sipFields[6].Descriptor() + // sip.DefaultCreatedAt holds the default value on creation for the created_at field. + sip.DefaultCreatedAt = sipDescCreatedAt.Default.(func() time.Time) } diff --git a/internal/persistence/ent/db/pkg.go b/internal/persistence/ent/db/sip.go similarity index 66% rename from internal/persistence/ent/db/pkg.go rename to internal/persistence/ent/db/sip.go index bb7cdfc02..4a86bb3e6 100644 --- a/internal/persistence/ent/db/pkg.go +++ b/internal/persistence/ent/db/sip.go @@ -9,12 +9,12 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" - "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/pkg" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" "github.com/google/uuid" ) -// Pkg is the model entity for the Pkg schema. -type Pkg struct { +// SIP is the model entity for the SIP schema. +type SIP struct { config `json:"-"` // ID of the ent. ID int `json:"id,omitempty"` @@ -37,13 +37,13 @@ type Pkg struct { // CompletedAt holds the value of the "completed_at" field. CompletedAt time.Time `json:"completed_at,omitempty"` // Edges holds the relations/edges for other nodes in the graph. - // The values are being populated by the PkgQuery when eager-loading is set. - Edges PkgEdges `json:"edges"` + // The values are being populated by the SIPQuery when eager-loading is set. + Edges SIPEdges `json:"edges"` selectValues sql.SelectValues } -// PkgEdges holds the relations/edges for other nodes in the graph. -type PkgEdges struct { +// SIPEdges holds the relations/edges for other nodes in the graph. +type SIPEdges struct { // PreservationActions holds the value of the preservation_actions edge. PreservationActions []*PreservationAction `json:"preservation_actions,omitempty"` // loadedTypes holds the information for reporting if a @@ -53,7 +53,7 @@ type PkgEdges struct { // PreservationActionsOrErr returns the PreservationActions value or an error if the edge // was not loaded in eager-loading. -func (e PkgEdges) PreservationActionsOrErr() ([]*PreservationAction, error) { +func (e SIPEdges) PreservationActionsOrErr() ([]*PreservationAction, error) { if e.loadedTypes[0] { return e.PreservationActions, nil } @@ -61,17 +61,17 @@ func (e PkgEdges) PreservationActionsOrErr() ([]*PreservationAction, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Pkg) scanValues(columns []string) ([]any, error) { +func (*SIP) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case pkg.FieldID, pkg.FieldStatus: + case sip.FieldID, sip.FieldStatus: values[i] = new(sql.NullInt64) - case pkg.FieldName, pkg.FieldWorkflowID: + case sip.FieldName, sip.FieldWorkflowID: values[i] = new(sql.NullString) - case pkg.FieldCreatedAt, pkg.FieldStartedAt, pkg.FieldCompletedAt: + case sip.FieldCreatedAt, sip.FieldStartedAt, sip.FieldCompletedAt: values[i] = new(sql.NullTime) - case pkg.FieldRunID, pkg.FieldAipID, pkg.FieldLocationID: + case sip.FieldRunID, sip.FieldAipID, sip.FieldLocationID: values[i] = new(uuid.UUID) default: values[i] = new(sql.UnknownType) @@ -81,143 +81,143 @@ func (*Pkg) scanValues(columns []string) ([]any, error) { } // assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the Pkg fields. -func (pk *Pkg) assignValues(columns []string, values []any) error { +// to the SIP fields. +func (s *SIP) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } for i := range columns { switch columns[i] { - case pkg.FieldID: + case sip.FieldID: value, ok := values[i].(*sql.NullInt64) if !ok { return fmt.Errorf("unexpected type %T for field id", value) } - pk.ID = int(value.Int64) - case pkg.FieldName: + s.ID = int(value.Int64) + case sip.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - pk.Name = value.String + s.Name = value.String } - case pkg.FieldWorkflowID: + case sip.FieldWorkflowID: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field workflow_id", values[i]) } else if value.Valid { - pk.WorkflowID = value.String + s.WorkflowID = value.String } - case pkg.FieldRunID: + case sip.FieldRunID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field run_id", values[i]) } else if value != nil { - pk.RunID = *value + s.RunID = *value } - case pkg.FieldAipID: + case sip.FieldAipID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field aip_id", values[i]) } else if value != nil { - pk.AipID = *value + s.AipID = *value } - case pkg.FieldLocationID: + case sip.FieldLocationID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field location_id", values[i]) } else if value != nil { - pk.LocationID = *value + s.LocationID = *value } - case pkg.FieldStatus: + case sip.FieldStatus: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field status", values[i]) } else if value.Valid { - pk.Status = int8(value.Int64) + s.Status = int8(value.Int64) } - case pkg.FieldCreatedAt: + case sip.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - pk.CreatedAt = value.Time + s.CreatedAt = value.Time } - case pkg.FieldStartedAt: + case sip.FieldStartedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field started_at", values[i]) } else if value.Valid { - pk.StartedAt = value.Time + s.StartedAt = value.Time } - case pkg.FieldCompletedAt: + case sip.FieldCompletedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field completed_at", values[i]) } else if value.Valid { - pk.CompletedAt = value.Time + s.CompletedAt = value.Time } default: - pk.selectValues.Set(columns[i], values[i]) + s.selectValues.Set(columns[i], values[i]) } } return nil } -// Value returns the ent.Value that was dynamically selected and assigned to the Pkg. +// Value returns the ent.Value that was dynamically selected and assigned to the SIP. // This includes values selected through modifiers, order, etc. -func (pk *Pkg) Value(name string) (ent.Value, error) { - return pk.selectValues.Get(name) +func (s *SIP) Value(name string) (ent.Value, error) { + return s.selectValues.Get(name) } -// QueryPreservationActions queries the "preservation_actions" edge of the Pkg entity. -func (pk *Pkg) QueryPreservationActions() *PreservationActionQuery { - return NewPkgClient(pk.config).QueryPreservationActions(pk) +// QueryPreservationActions queries the "preservation_actions" edge of the SIP entity. +func (s *SIP) QueryPreservationActions() *PreservationActionQuery { + return NewSIPClient(s.config).QueryPreservationActions(s) } -// Update returns a builder for updating this Pkg. -// Note that you need to call Pkg.Unwrap() before calling this method if this Pkg +// Update returns a builder for updating this SIP. +// Note that you need to call SIP.Unwrap() before calling this method if this SIP // was returned from a transaction, and the transaction was committed or rolled back. -func (pk *Pkg) Update() *PkgUpdateOne { - return NewPkgClient(pk.config).UpdateOne(pk) +func (s *SIP) Update() *SIPUpdateOne { + return NewSIPClient(s.config).UpdateOne(s) } -// Unwrap unwraps the Pkg entity that was returned from a transaction after it was closed, +// Unwrap unwraps the SIP entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (pk *Pkg) Unwrap() *Pkg { - _tx, ok := pk.config.driver.(*txDriver) +func (s *SIP) Unwrap() *SIP { + _tx, ok := s.config.driver.(*txDriver) if !ok { - panic("db: Pkg is not a transactional entity") + panic("db: SIP is not a transactional entity") } - pk.config.driver = _tx.drv - return pk + s.config.driver = _tx.drv + return s } // String implements the fmt.Stringer. -func (pk *Pkg) String() string { +func (s *SIP) String() string { var builder strings.Builder - builder.WriteString("Pkg(") - builder.WriteString(fmt.Sprintf("id=%v, ", pk.ID)) + builder.WriteString("SIP(") + builder.WriteString(fmt.Sprintf("id=%v, ", s.ID)) builder.WriteString("name=") - builder.WriteString(pk.Name) + builder.WriteString(s.Name) builder.WriteString(", ") builder.WriteString("workflow_id=") - builder.WriteString(pk.WorkflowID) + builder.WriteString(s.WorkflowID) builder.WriteString(", ") builder.WriteString("run_id=") - builder.WriteString(fmt.Sprintf("%v", pk.RunID)) + builder.WriteString(fmt.Sprintf("%v", s.RunID)) builder.WriteString(", ") builder.WriteString("aip_id=") - builder.WriteString(fmt.Sprintf("%v", pk.AipID)) + builder.WriteString(fmt.Sprintf("%v", s.AipID)) builder.WriteString(", ") builder.WriteString("location_id=") - builder.WriteString(fmt.Sprintf("%v", pk.LocationID)) + builder.WriteString(fmt.Sprintf("%v", s.LocationID)) builder.WriteString(", ") builder.WriteString("status=") - builder.WriteString(fmt.Sprintf("%v", pk.Status)) + builder.WriteString(fmt.Sprintf("%v", s.Status)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(pk.CreatedAt.Format(time.ANSIC)) + builder.WriteString(s.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("started_at=") - builder.WriteString(pk.StartedAt.Format(time.ANSIC)) + builder.WriteString(s.StartedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("completed_at=") - builder.WriteString(pk.CompletedAt.Format(time.ANSIC)) + builder.WriteString(s.CompletedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } -// Pkgs is a parsable slice of Pkg. -type Pkgs []*Pkg +// SIPs is a parsable slice of SIP. +type SIPs []*SIP diff --git a/internal/persistence/ent/db/pkg/pkg.go b/internal/persistence/ent/db/sip/sip.go similarity index 93% rename from internal/persistence/ent/db/pkg/pkg.go rename to internal/persistence/ent/db/sip/sip.go index f70bf13aa..79196ae3a 100644 --- a/internal/persistence/ent/db/pkg/pkg.go +++ b/internal/persistence/ent/db/sip/sip.go @@ -1,6 +1,6 @@ // Code generated by ent, DO NOT EDIT. -package pkg +package sip import ( "time" @@ -10,8 +10,8 @@ import ( ) const ( - // Label holds the string label denoting the pkg type in the database. - Label = "pkg" + // Label holds the string label denoting the sip type in the database. + Label = "sip" // FieldID holds the string denoting the id field in the database. FieldID = "id" // FieldName holds the string denoting the name field in the database. @@ -34,18 +34,18 @@ const ( FieldCompletedAt = "completed_at" // EdgePreservationActions holds the string denoting the preservation_actions edge name in mutations. EdgePreservationActions = "preservation_actions" - // Table holds the table name of the pkg in the database. - Table = "package" + // Table holds the table name of the sip in the database. + Table = "sip" // PreservationActionsTable is the table that holds the preservation_actions relation/edge. PreservationActionsTable = "preservation_action" // PreservationActionsInverseTable is the table name for the PreservationAction entity. // It exists in this package in order to avoid circular dependency with the "preservationaction" package. PreservationActionsInverseTable = "preservation_action" // PreservationActionsColumn is the table column denoting the preservation_actions relation/edge. - PreservationActionsColumn = "package_id" + PreservationActionsColumn = "sip_id" ) -// Columns holds all SQL columns for pkg fields. +// Columns holds all SQL columns for sip fields. var Columns = []string{ FieldID, FieldName, @@ -74,7 +74,7 @@ var ( DefaultCreatedAt func() time.Time ) -// OrderOption defines the ordering options for the Pkg queries. +// OrderOption defines the ordering options for the SIP queries. type OrderOption func(*sql.Selector) // ByID orders the results by the id field. diff --git a/internal/persistence/ent/db/sip/where.go b/internal/persistence/ent/db/sip/where.go new file mode 100644 index 000000000..f8cb98b00 --- /dev/null +++ b/internal/persistence/ent/db/sip/where.go @@ -0,0 +1,590 @@ +// Code generated by ent, DO NOT EDIT. + +package sip + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldName, v)) +} + +// WorkflowID applies equality check predicate on the "workflow_id" field. It's identical to WorkflowIDEQ. +func WorkflowID(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldWorkflowID, v)) +} + +// RunID applies equality check predicate on the "run_id" field. It's identical to RunIDEQ. +func RunID(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldRunID, v)) +} + +// AipID applies equality check predicate on the "aip_id" field. It's identical to AipIDEQ. +func AipID(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldAipID, v)) +} + +// LocationID applies equality check predicate on the "location_id" field. It's identical to LocationIDEQ. +func LocationID(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldLocationID, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v int8) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldCreatedAt, v)) +} + +// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. +func StartedAt(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldStartedAt, v)) +} + +// CompletedAt applies equality check predicate on the "completed_at" field. It's identical to CompletedAtEQ. +func CompletedAt(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldCompletedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.SIP { + return predicate.SIP(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldContainsFold(FieldName, v)) +} + +// WorkflowIDEQ applies the EQ predicate on the "workflow_id" field. +func WorkflowIDEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldWorkflowID, v)) +} + +// WorkflowIDNEQ applies the NEQ predicate on the "workflow_id" field. +func WorkflowIDNEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldWorkflowID, v)) +} + +// WorkflowIDIn applies the In predicate on the "workflow_id" field. +func WorkflowIDIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldWorkflowID, vs...)) +} + +// WorkflowIDNotIn applies the NotIn predicate on the "workflow_id" field. +func WorkflowIDNotIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldWorkflowID, vs...)) +} + +// WorkflowIDGT applies the GT predicate on the "workflow_id" field. +func WorkflowIDGT(v string) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldWorkflowID, v)) +} + +// WorkflowIDGTE applies the GTE predicate on the "workflow_id" field. +func WorkflowIDGTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldWorkflowID, v)) +} + +// WorkflowIDLT applies the LT predicate on the "workflow_id" field. +func WorkflowIDLT(v string) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldWorkflowID, v)) +} + +// WorkflowIDLTE applies the LTE predicate on the "workflow_id" field. +func WorkflowIDLTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldWorkflowID, v)) +} + +// WorkflowIDContains applies the Contains predicate on the "workflow_id" field. +func WorkflowIDContains(v string) predicate.SIP { + return predicate.SIP(sql.FieldContains(FieldWorkflowID, v)) +} + +// WorkflowIDHasPrefix applies the HasPrefix predicate on the "workflow_id" field. +func WorkflowIDHasPrefix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasPrefix(FieldWorkflowID, v)) +} + +// WorkflowIDHasSuffix applies the HasSuffix predicate on the "workflow_id" field. +func WorkflowIDHasSuffix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasSuffix(FieldWorkflowID, v)) +} + +// WorkflowIDEqualFold applies the EqualFold predicate on the "workflow_id" field. +func WorkflowIDEqualFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldEqualFold(FieldWorkflowID, v)) +} + +// WorkflowIDContainsFold applies the ContainsFold predicate on the "workflow_id" field. +func WorkflowIDContainsFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldContainsFold(FieldWorkflowID, v)) +} + +// RunIDEQ applies the EQ predicate on the "run_id" field. +func RunIDEQ(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldRunID, v)) +} + +// RunIDNEQ applies the NEQ predicate on the "run_id" field. +func RunIDNEQ(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldRunID, v)) +} + +// RunIDIn applies the In predicate on the "run_id" field. +func RunIDIn(vs ...uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldRunID, vs...)) +} + +// RunIDNotIn applies the NotIn predicate on the "run_id" field. +func RunIDNotIn(vs ...uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldRunID, vs...)) +} + +// RunIDGT applies the GT predicate on the "run_id" field. +func RunIDGT(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldRunID, v)) +} + +// RunIDGTE applies the GTE predicate on the "run_id" field. +func RunIDGTE(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldRunID, v)) +} + +// RunIDLT applies the LT predicate on the "run_id" field. +func RunIDLT(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldRunID, v)) +} + +// RunIDLTE applies the LTE predicate on the "run_id" field. +func RunIDLTE(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldRunID, v)) +} + +// AipIDEQ applies the EQ predicate on the "aip_id" field. +func AipIDEQ(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldAipID, v)) +} + +// AipIDNEQ applies the NEQ predicate on the "aip_id" field. +func AipIDNEQ(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldAipID, v)) +} + +// AipIDIn applies the In predicate on the "aip_id" field. +func AipIDIn(vs ...uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldAipID, vs...)) +} + +// AipIDNotIn applies the NotIn predicate on the "aip_id" field. +func AipIDNotIn(vs ...uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldAipID, vs...)) +} + +// AipIDGT applies the GT predicate on the "aip_id" field. +func AipIDGT(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldAipID, v)) +} + +// AipIDGTE applies the GTE predicate on the "aip_id" field. +func AipIDGTE(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldAipID, v)) +} + +// AipIDLT applies the LT predicate on the "aip_id" field. +func AipIDLT(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldAipID, v)) +} + +// AipIDLTE applies the LTE predicate on the "aip_id" field. +func AipIDLTE(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldAipID, v)) +} + +// AipIDIsNil applies the IsNil predicate on the "aip_id" field. +func AipIDIsNil() predicate.SIP { + return predicate.SIP(sql.FieldIsNull(FieldAipID)) +} + +// AipIDNotNil applies the NotNil predicate on the "aip_id" field. +func AipIDNotNil() predicate.SIP { + return predicate.SIP(sql.FieldNotNull(FieldAipID)) +} + +// LocationIDEQ applies the EQ predicate on the "location_id" field. +func LocationIDEQ(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldLocationID, v)) +} + +// LocationIDNEQ applies the NEQ predicate on the "location_id" field. +func LocationIDNEQ(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldLocationID, v)) +} + +// LocationIDIn applies the In predicate on the "location_id" field. +func LocationIDIn(vs ...uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldLocationID, vs...)) +} + +// LocationIDNotIn applies the NotIn predicate on the "location_id" field. +func LocationIDNotIn(vs ...uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldLocationID, vs...)) +} + +// LocationIDGT applies the GT predicate on the "location_id" field. +func LocationIDGT(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldLocationID, v)) +} + +// LocationIDGTE applies the GTE predicate on the "location_id" field. +func LocationIDGTE(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldLocationID, v)) +} + +// LocationIDLT applies the LT predicate on the "location_id" field. +func LocationIDLT(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldLocationID, v)) +} + +// LocationIDLTE applies the LTE predicate on the "location_id" field. +func LocationIDLTE(v uuid.UUID) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldLocationID, v)) +} + +// LocationIDIsNil applies the IsNil predicate on the "location_id" field. +func LocationIDIsNil() predicate.SIP { + return predicate.SIP(sql.FieldIsNull(FieldLocationID)) +} + +// LocationIDNotNil applies the NotNil predicate on the "location_id" field. +func LocationIDNotNil() predicate.SIP { + return predicate.SIP(sql.FieldNotNull(FieldLocationID)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v int8) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v int8) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...int8) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...int8) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v int8) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v int8) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v int8) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v int8) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldCreatedAt, v)) +} + +// StartedAtEQ applies the EQ predicate on the "started_at" field. +func StartedAtEQ(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldStartedAt, v)) +} + +// StartedAtNEQ applies the NEQ predicate on the "started_at" field. +func StartedAtNEQ(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldStartedAt, v)) +} + +// StartedAtIn applies the In predicate on the "started_at" field. +func StartedAtIn(vs ...time.Time) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldStartedAt, vs...)) +} + +// StartedAtNotIn applies the NotIn predicate on the "started_at" field. +func StartedAtNotIn(vs ...time.Time) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldStartedAt, vs...)) +} + +// StartedAtGT applies the GT predicate on the "started_at" field. +func StartedAtGT(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldStartedAt, v)) +} + +// StartedAtGTE applies the GTE predicate on the "started_at" field. +func StartedAtGTE(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldStartedAt, v)) +} + +// StartedAtLT applies the LT predicate on the "started_at" field. +func StartedAtLT(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldStartedAt, v)) +} + +// StartedAtLTE applies the LTE predicate on the "started_at" field. +func StartedAtLTE(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldStartedAt, v)) +} + +// StartedAtIsNil applies the IsNil predicate on the "started_at" field. +func StartedAtIsNil() predicate.SIP { + return predicate.SIP(sql.FieldIsNull(FieldStartedAt)) +} + +// StartedAtNotNil applies the NotNil predicate on the "started_at" field. +func StartedAtNotNil() predicate.SIP { + return predicate.SIP(sql.FieldNotNull(FieldStartedAt)) +} + +// CompletedAtEQ applies the EQ predicate on the "completed_at" field. +func CompletedAtEQ(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldCompletedAt, v)) +} + +// CompletedAtNEQ applies the NEQ predicate on the "completed_at" field. +func CompletedAtNEQ(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldCompletedAt, v)) +} + +// CompletedAtIn applies the In predicate on the "completed_at" field. +func CompletedAtIn(vs ...time.Time) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldCompletedAt, vs...)) +} + +// CompletedAtNotIn applies the NotIn predicate on the "completed_at" field. +func CompletedAtNotIn(vs ...time.Time) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldCompletedAt, vs...)) +} + +// CompletedAtGT applies the GT predicate on the "completed_at" field. +func CompletedAtGT(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldCompletedAt, v)) +} + +// CompletedAtGTE applies the GTE predicate on the "completed_at" field. +func CompletedAtGTE(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldCompletedAt, v)) +} + +// CompletedAtLT applies the LT predicate on the "completed_at" field. +func CompletedAtLT(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldCompletedAt, v)) +} + +// CompletedAtLTE applies the LTE predicate on the "completed_at" field. +func CompletedAtLTE(v time.Time) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldCompletedAt, v)) +} + +// CompletedAtIsNil applies the IsNil predicate on the "completed_at" field. +func CompletedAtIsNil() predicate.SIP { + return predicate.SIP(sql.FieldIsNull(FieldCompletedAt)) +} + +// CompletedAtNotNil applies the NotNil predicate on the "completed_at" field. +func CompletedAtNotNil() predicate.SIP { + return predicate.SIP(sql.FieldNotNull(FieldCompletedAt)) +} + +// HasPreservationActions applies the HasEdge predicate on the "preservation_actions" edge. +func HasPreservationActions() predicate.SIP { + return predicate.SIP(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PreservationActionsTable, PreservationActionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPreservationActionsWith applies the HasEdge predicate on the "preservation_actions" edge with a given conditions (other predicates). +func HasPreservationActionsWith(preds ...predicate.PreservationAction) predicate.SIP { + return predicate.SIP(func(s *sql.Selector) { + step := newPreservationActionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.SIP) predicate.SIP { + return predicate.SIP(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.SIP) predicate.SIP { + return predicate.SIP(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.SIP) predicate.SIP { + return predicate.SIP(sql.NotPredicates(p)) +} diff --git a/internal/persistence/ent/db/sip_create.go b/internal/persistence/ent/db/sip_create.go new file mode 100644 index 000000000..5c4e1ba28 --- /dev/null +++ b/internal/persistence/ent/db/sip_create.go @@ -0,0 +1,359 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" + "github.com/google/uuid" +) + +// SIPCreate is the builder for creating a SIP entity. +type SIPCreate struct { + config + mutation *SIPMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (sc *SIPCreate) SetName(s string) *SIPCreate { + sc.mutation.SetName(s) + return sc +} + +// SetWorkflowID sets the "workflow_id" field. +func (sc *SIPCreate) SetWorkflowID(s string) *SIPCreate { + sc.mutation.SetWorkflowID(s) + return sc +} + +// SetRunID sets the "run_id" field. +func (sc *SIPCreate) SetRunID(u uuid.UUID) *SIPCreate { + sc.mutation.SetRunID(u) + return sc +} + +// SetAipID sets the "aip_id" field. +func (sc *SIPCreate) SetAipID(u uuid.UUID) *SIPCreate { + sc.mutation.SetAipID(u) + return sc +} + +// SetNillableAipID sets the "aip_id" field if the given value is not nil. +func (sc *SIPCreate) SetNillableAipID(u *uuid.UUID) *SIPCreate { + if u != nil { + sc.SetAipID(*u) + } + return sc +} + +// SetLocationID sets the "location_id" field. +func (sc *SIPCreate) SetLocationID(u uuid.UUID) *SIPCreate { + sc.mutation.SetLocationID(u) + return sc +} + +// SetNillableLocationID sets the "location_id" field if the given value is not nil. +func (sc *SIPCreate) SetNillableLocationID(u *uuid.UUID) *SIPCreate { + if u != nil { + sc.SetLocationID(*u) + } + return sc +} + +// SetStatus sets the "status" field. +func (sc *SIPCreate) SetStatus(i int8) *SIPCreate { + sc.mutation.SetStatus(i) + return sc +} + +// SetCreatedAt sets the "created_at" field. +func (sc *SIPCreate) SetCreatedAt(t time.Time) *SIPCreate { + sc.mutation.SetCreatedAt(t) + return sc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (sc *SIPCreate) SetNillableCreatedAt(t *time.Time) *SIPCreate { + if t != nil { + sc.SetCreatedAt(*t) + } + return sc +} + +// SetStartedAt sets the "started_at" field. +func (sc *SIPCreate) SetStartedAt(t time.Time) *SIPCreate { + sc.mutation.SetStartedAt(t) + return sc +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (sc *SIPCreate) SetNillableStartedAt(t *time.Time) *SIPCreate { + if t != nil { + sc.SetStartedAt(*t) + } + return sc +} + +// SetCompletedAt sets the "completed_at" field. +func (sc *SIPCreate) SetCompletedAt(t time.Time) *SIPCreate { + sc.mutation.SetCompletedAt(t) + return sc +} + +// SetNillableCompletedAt sets the "completed_at" field if the given value is not nil. +func (sc *SIPCreate) SetNillableCompletedAt(t *time.Time) *SIPCreate { + if t != nil { + sc.SetCompletedAt(*t) + } + return sc +} + +// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by IDs. +func (sc *SIPCreate) AddPreservationActionIDs(ids ...int) *SIPCreate { + sc.mutation.AddPreservationActionIDs(ids...) + return sc +} + +// AddPreservationActions adds the "preservation_actions" edges to the PreservationAction entity. +func (sc *SIPCreate) AddPreservationActions(p ...*PreservationAction) *SIPCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return sc.AddPreservationActionIDs(ids...) +} + +// Mutation returns the SIPMutation object of the builder. +func (sc *SIPCreate) Mutation() *SIPMutation { + return sc.mutation +} + +// Save creates the SIP in the database. +func (sc *SIPCreate) Save(ctx context.Context) (*SIP, error) { + sc.defaults() + return withHooks(ctx, sc.sqlSave, sc.mutation, sc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (sc *SIPCreate) SaveX(ctx context.Context) *SIP { + v, err := sc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (sc *SIPCreate) Exec(ctx context.Context) error { + _, err := sc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (sc *SIPCreate) ExecX(ctx context.Context) { + if err := sc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (sc *SIPCreate) defaults() { + if _, ok := sc.mutation.CreatedAt(); !ok { + v := sip.DefaultCreatedAt() + sc.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (sc *SIPCreate) check() error { + if _, ok := sc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`db: missing required field "SIP.name"`)} + } + if _, ok := sc.mutation.WorkflowID(); !ok { + return &ValidationError{Name: "workflow_id", err: errors.New(`db: missing required field "SIP.workflow_id"`)} + } + if _, ok := sc.mutation.RunID(); !ok { + return &ValidationError{Name: "run_id", err: errors.New(`db: missing required field "SIP.run_id"`)} + } + if _, ok := sc.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`db: missing required field "SIP.status"`)} + } + if _, ok := sc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "SIP.created_at"`)} + } + return nil +} + +func (sc *SIPCreate) sqlSave(ctx context.Context) (*SIP, error) { + if err := sc.check(); err != nil { + return nil, err + } + _node, _spec := sc.createSpec() + if err := sqlgraph.CreateNode(ctx, sc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + sc.mutation.id = &_node.ID + sc.mutation.done = true + return _node, nil +} + +func (sc *SIPCreate) createSpec() (*SIP, *sqlgraph.CreateSpec) { + var ( + _node = &SIP{config: sc.config} + _spec = sqlgraph.NewCreateSpec(sip.Table, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + ) + if value, ok := sc.mutation.Name(); ok { + _spec.SetField(sip.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := sc.mutation.WorkflowID(); ok { + _spec.SetField(sip.FieldWorkflowID, field.TypeString, value) + _node.WorkflowID = value + } + if value, ok := sc.mutation.RunID(); ok { + _spec.SetField(sip.FieldRunID, field.TypeUUID, value) + _node.RunID = value + } + if value, ok := sc.mutation.AipID(); ok { + _spec.SetField(sip.FieldAipID, field.TypeUUID, value) + _node.AipID = value + } + if value, ok := sc.mutation.LocationID(); ok { + _spec.SetField(sip.FieldLocationID, field.TypeUUID, value) + _node.LocationID = value + } + if value, ok := sc.mutation.Status(); ok { + _spec.SetField(sip.FieldStatus, field.TypeInt8, value) + _node.Status = value + } + if value, ok := sc.mutation.CreatedAt(); ok { + _spec.SetField(sip.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := sc.mutation.StartedAt(); ok { + _spec.SetField(sip.FieldStartedAt, field.TypeTime, value) + _node.StartedAt = value + } + if value, ok := sc.mutation.CompletedAt(); ok { + _spec.SetField(sip.FieldCompletedAt, field.TypeTime, value) + _node.CompletedAt = value + } + if nodes := sc.mutation.PreservationActionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// SIPCreateBulk is the builder for creating many SIP entities in bulk. +type SIPCreateBulk struct { + config + err error + builders []*SIPCreate +} + +// Save creates the SIP entities in the database. +func (scb *SIPCreateBulk) Save(ctx context.Context) ([]*SIP, error) { + if scb.err != nil { + return nil, scb.err + } + specs := make([]*sqlgraph.CreateSpec, len(scb.builders)) + nodes := make([]*SIP, len(scb.builders)) + mutators := make([]Mutator, len(scb.builders)) + for i := range scb.builders { + func(i int, root context.Context) { + builder := scb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*SIPMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, scb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, scb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, scb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (scb *SIPCreateBulk) SaveX(ctx context.Context) []*SIP { + v, err := scb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (scb *SIPCreateBulk) Exec(ctx context.Context) error { + _, err := scb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (scb *SIPCreateBulk) ExecX(ctx context.Context) { + if err := scb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/persistence/ent/db/sip_delete.go b/internal/persistence/ent/db/sip_delete.go new file mode 100644 index 000000000..9e7bfac42 --- /dev/null +++ b/internal/persistence/ent/db/sip_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" +) + +// SIPDelete is the builder for deleting a SIP entity. +type SIPDelete struct { + config + hooks []Hook + mutation *SIPMutation +} + +// Where appends a list predicates to the SIPDelete builder. +func (sd *SIPDelete) Where(ps ...predicate.SIP) *SIPDelete { + sd.mutation.Where(ps...) + return sd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (sd *SIPDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, sd.sqlExec, sd.mutation, sd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (sd *SIPDelete) ExecX(ctx context.Context) int { + n, err := sd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (sd *SIPDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(sip.Table, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + if ps := sd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, sd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + sd.mutation.done = true + return affected, err +} + +// SIPDeleteOne is the builder for deleting a single SIP entity. +type SIPDeleteOne struct { + sd *SIPDelete +} + +// Where appends a list predicates to the SIPDelete builder. +func (sdo *SIPDeleteOne) Where(ps ...predicate.SIP) *SIPDeleteOne { + sdo.sd.mutation.Where(ps...) + return sdo +} + +// Exec executes the deletion query. +func (sdo *SIPDeleteOne) Exec(ctx context.Context) error { + n, err := sdo.sd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{sip.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (sdo *SIPDeleteOne) ExecX(ctx context.Context) { + if err := sdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/persistence/ent/db/sip_query.go b/internal/persistence/ent/db/sip_query.go new file mode 100644 index 000000000..7331426e8 --- /dev/null +++ b/internal/persistence/ent/db/sip_query.go @@ -0,0 +1,608 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" +) + +// SIPQuery is the builder for querying SIP entities. +type SIPQuery struct { + config + ctx *QueryContext + order []sip.OrderOption + inters []Interceptor + predicates []predicate.SIP + withPreservationActions *PreservationActionQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the SIPQuery builder. +func (sq *SIPQuery) Where(ps ...predicate.SIP) *SIPQuery { + sq.predicates = append(sq.predicates, ps...) + return sq +} + +// Limit the number of records to be returned by this query. +func (sq *SIPQuery) Limit(limit int) *SIPQuery { + sq.ctx.Limit = &limit + return sq +} + +// Offset to start from. +func (sq *SIPQuery) Offset(offset int) *SIPQuery { + sq.ctx.Offset = &offset + return sq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (sq *SIPQuery) Unique(unique bool) *SIPQuery { + sq.ctx.Unique = &unique + return sq +} + +// Order specifies how the records should be ordered. +func (sq *SIPQuery) Order(o ...sip.OrderOption) *SIPQuery { + sq.order = append(sq.order, o...) + return sq +} + +// QueryPreservationActions chains the current query on the "preservation_actions" edge. +func (sq *SIPQuery) QueryPreservationActions() *PreservationActionQuery { + query := (&PreservationActionClient{config: sq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := sq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := sq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(sip.Table, sip.FieldID, selector), + sqlgraph.To(preservationaction.Table, preservationaction.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, sip.PreservationActionsTable, sip.PreservationActionsColumn), + ) + fromU = sqlgraph.SetNeighbors(sq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first SIP entity from the query. +// Returns a *NotFoundError when no SIP was found. +func (sq *SIPQuery) First(ctx context.Context) (*SIP, error) { + nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{sip.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (sq *SIPQuery) FirstX(ctx context.Context) *SIP { + node, err := sq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first SIP ID from the query. +// Returns a *NotFoundError when no SIP ID was found. +func (sq *SIPQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{sip.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (sq *SIPQuery) FirstIDX(ctx context.Context) int { + id, err := sq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single SIP entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one SIP entity is found. +// Returns a *NotFoundError when no SIP entities are found. +func (sq *SIPQuery) Only(ctx context.Context) (*SIP, error) { + nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{sip.Label} + default: + return nil, &NotSingularError{sip.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (sq *SIPQuery) OnlyX(ctx context.Context) *SIP { + node, err := sq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only SIP ID in the query. +// Returns a *NotSingularError when more than one SIP ID is found. +// Returns a *NotFoundError when no entities are found. +func (sq *SIPQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{sip.Label} + default: + err = &NotSingularError{sip.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (sq *SIPQuery) OnlyIDX(ctx context.Context) int { + id, err := sq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of SIPs. +func (sq *SIPQuery) All(ctx context.Context) ([]*SIP, error) { + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryAll) + if err := sq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*SIP, *SIPQuery]() + return withInterceptors[[]*SIP](ctx, sq, qr, sq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (sq *SIPQuery) AllX(ctx context.Context) []*SIP { + nodes, err := sq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of SIP IDs. +func (sq *SIPQuery) IDs(ctx context.Context) (ids []int, err error) { + if sq.ctx.Unique == nil && sq.path != nil { + sq.Unique(true) + } + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryIDs) + if err = sq.Select(sip.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (sq *SIPQuery) IDsX(ctx context.Context) []int { + ids, err := sq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (sq *SIPQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryCount) + if err := sq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, sq, querierCount[*SIPQuery](), sq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (sq *SIPQuery) CountX(ctx context.Context) int { + count, err := sq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (sq *SIPQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryExist) + switch _, err := sq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (sq *SIPQuery) ExistX(ctx context.Context) bool { + exist, err := sq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the SIPQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (sq *SIPQuery) Clone() *SIPQuery { + if sq == nil { + return nil + } + return &SIPQuery{ + config: sq.config, + ctx: sq.ctx.Clone(), + order: append([]sip.OrderOption{}, sq.order...), + inters: append([]Interceptor{}, sq.inters...), + predicates: append([]predicate.SIP{}, sq.predicates...), + withPreservationActions: sq.withPreservationActions.Clone(), + // clone intermediate query. + sql: sq.sql.Clone(), + path: sq.path, + } +} + +// WithPreservationActions tells the query-builder to eager-load the nodes that are connected to +// the "preservation_actions" edge. The optional arguments are used to configure the query builder of the edge. +func (sq *SIPQuery) WithPreservationActions(opts ...func(*PreservationActionQuery)) *SIPQuery { + query := (&PreservationActionClient{config: sq.config}).Query() + for _, opt := range opts { + opt(query) + } + sq.withPreservationActions = query + return sq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.SIP.Query(). +// GroupBy(sip.FieldName). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (sq *SIPQuery) GroupBy(field string, fields ...string) *SIPGroupBy { + sq.ctx.Fields = append([]string{field}, fields...) + grbuild := &SIPGroupBy{build: sq} + grbuild.flds = &sq.ctx.Fields + grbuild.label = sip.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.SIP.Query(). +// Select(sip.FieldName). +// Scan(ctx, &v) +func (sq *SIPQuery) Select(fields ...string) *SIPSelect { + sq.ctx.Fields = append(sq.ctx.Fields, fields...) + sbuild := &SIPSelect{SIPQuery: sq} + sbuild.label = sip.Label + sbuild.flds, sbuild.scan = &sq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a SIPSelect configured with the given aggregations. +func (sq *SIPQuery) Aggregate(fns ...AggregateFunc) *SIPSelect { + return sq.Select().Aggregate(fns...) +} + +func (sq *SIPQuery) prepareQuery(ctx context.Context) error { + for _, inter := range sq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, sq); err != nil { + return err + } + } + } + for _, f := range sq.ctx.Fields { + if !sip.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if sq.path != nil { + prev, err := sq.path(ctx) + if err != nil { + return err + } + sq.sql = prev + } + return nil +} + +func (sq *SIPQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*SIP, error) { + var ( + nodes = []*SIP{} + _spec = sq.querySpec() + loadedTypes = [1]bool{ + sq.withPreservationActions != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*SIP).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &SIP{config: sq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, sq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := sq.withPreservationActions; query != nil { + if err := sq.loadPreservationActions(ctx, query, nodes, + func(n *SIP) { n.Edges.PreservationActions = []*PreservationAction{} }, + func(n *SIP, e *PreservationAction) { + n.Edges.PreservationActions = append(n.Edges.PreservationActions, e) + }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (sq *SIPQuery) loadPreservationActions(ctx context.Context, query *PreservationActionQuery, nodes []*SIP, init func(*SIP), assign func(*SIP, *PreservationAction)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*SIP) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(preservationaction.FieldSipID) + } + query.Where(predicate.PreservationAction(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(sip.PreservationActionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.SipID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "sip_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (sq *SIPQuery) sqlCount(ctx context.Context) (int, error) { + _spec := sq.querySpec() + _spec.Node.Columns = sq.ctx.Fields + if len(sq.ctx.Fields) > 0 { + _spec.Unique = sq.ctx.Unique != nil && *sq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, sq.driver, _spec) +} + +func (sq *SIPQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(sip.Table, sip.Columns, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + _spec.From = sq.sql + if unique := sq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if sq.path != nil { + _spec.Unique = true + } + if fields := sq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, sip.FieldID) + for i := range fields { + if fields[i] != sip.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := sq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := sq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := sq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := sq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (sq *SIPQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(sq.driver.Dialect()) + t1 := builder.Table(sip.Table) + columns := sq.ctx.Fields + if len(columns) == 0 { + columns = sip.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if sq.sql != nil { + selector = sq.sql + selector.Select(selector.Columns(columns...)...) + } + if sq.ctx.Unique != nil && *sq.ctx.Unique { + selector.Distinct() + } + for _, p := range sq.predicates { + p(selector) + } + for _, p := range sq.order { + p(selector) + } + if offset := sq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := sq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// SIPGroupBy is the group-by builder for SIP entities. +type SIPGroupBy struct { + selector + build *SIPQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (sgb *SIPGroupBy) Aggregate(fns ...AggregateFunc) *SIPGroupBy { + sgb.fns = append(sgb.fns, fns...) + return sgb +} + +// Scan applies the selector query and scans the result into the given value. +func (sgb *SIPGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sgb.build.ctx, ent.OpQueryGroupBy) + if err := sgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SIPQuery, *SIPGroupBy](ctx, sgb.build, sgb, sgb.build.inters, v) +} + +func (sgb *SIPGroupBy) sqlScan(ctx context.Context, root *SIPQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(sgb.fns)) + for _, fn := range sgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*sgb.flds)+len(sgb.fns)) + for _, f := range *sgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*sgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := sgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// SIPSelect is the builder for selecting fields of SIP entities. +type SIPSelect struct { + *SIPQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ss *SIPSelect) Aggregate(fns ...AggregateFunc) *SIPSelect { + ss.fns = append(ss.fns, fns...) + return ss +} + +// Scan applies the selector query and scans the result into the given value. +func (ss *SIPSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ss.ctx, ent.OpQuerySelect) + if err := ss.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SIPQuery, *SIPSelect](ctx, ss.SIPQuery, ss, ss.inters, v) +} + +func (ss *SIPSelect) sqlScan(ctx context.Context, root *SIPQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ss.fns)) + for _, fn := range ss.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ss.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ss.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/persistence/ent/db/sip_update.go b/internal/persistence/ent/db/sip_update.go new file mode 100644 index 000000000..5f86bf7c3 --- /dev/null +++ b/internal/persistence/ent/db/sip_update.go @@ -0,0 +1,704 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/preservationaction" + "github.com/artefactual-sdps/enduro/internal/persistence/ent/db/sip" + "github.com/google/uuid" +) + +// SIPUpdate is the builder for updating SIP entities. +type SIPUpdate struct { + config + hooks []Hook + mutation *SIPMutation +} + +// Where appends a list predicates to the SIPUpdate builder. +func (su *SIPUpdate) Where(ps ...predicate.SIP) *SIPUpdate { + su.mutation.Where(ps...) + return su +} + +// SetName sets the "name" field. +func (su *SIPUpdate) SetName(s string) *SIPUpdate { + su.mutation.SetName(s) + return su +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (su *SIPUpdate) SetNillableName(s *string) *SIPUpdate { + if s != nil { + su.SetName(*s) + } + return su +} + +// SetWorkflowID sets the "workflow_id" field. +func (su *SIPUpdate) SetWorkflowID(s string) *SIPUpdate { + su.mutation.SetWorkflowID(s) + return su +} + +// SetNillableWorkflowID sets the "workflow_id" field if the given value is not nil. +func (su *SIPUpdate) SetNillableWorkflowID(s *string) *SIPUpdate { + if s != nil { + su.SetWorkflowID(*s) + } + return su +} + +// SetRunID sets the "run_id" field. +func (su *SIPUpdate) SetRunID(u uuid.UUID) *SIPUpdate { + su.mutation.SetRunID(u) + return su +} + +// SetNillableRunID sets the "run_id" field if the given value is not nil. +func (su *SIPUpdate) SetNillableRunID(u *uuid.UUID) *SIPUpdate { + if u != nil { + su.SetRunID(*u) + } + return su +} + +// SetAipID sets the "aip_id" field. +func (su *SIPUpdate) SetAipID(u uuid.UUID) *SIPUpdate { + su.mutation.SetAipID(u) + return su +} + +// SetNillableAipID sets the "aip_id" field if the given value is not nil. +func (su *SIPUpdate) SetNillableAipID(u *uuid.UUID) *SIPUpdate { + if u != nil { + su.SetAipID(*u) + } + return su +} + +// ClearAipID clears the value of the "aip_id" field. +func (su *SIPUpdate) ClearAipID() *SIPUpdate { + su.mutation.ClearAipID() + return su +} + +// SetLocationID sets the "location_id" field. +func (su *SIPUpdate) SetLocationID(u uuid.UUID) *SIPUpdate { + su.mutation.SetLocationID(u) + return su +} + +// SetNillableLocationID sets the "location_id" field if the given value is not nil. +func (su *SIPUpdate) SetNillableLocationID(u *uuid.UUID) *SIPUpdate { + if u != nil { + su.SetLocationID(*u) + } + return su +} + +// ClearLocationID clears the value of the "location_id" field. +func (su *SIPUpdate) ClearLocationID() *SIPUpdate { + su.mutation.ClearLocationID() + return su +} + +// SetStatus sets the "status" field. +func (su *SIPUpdate) SetStatus(i int8) *SIPUpdate { + su.mutation.ResetStatus() + su.mutation.SetStatus(i) + return su +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (su *SIPUpdate) SetNillableStatus(i *int8) *SIPUpdate { + if i != nil { + su.SetStatus(*i) + } + return su +} + +// AddStatus adds i to the "status" field. +func (su *SIPUpdate) AddStatus(i int8) *SIPUpdate { + su.mutation.AddStatus(i) + return su +} + +// SetStartedAt sets the "started_at" field. +func (su *SIPUpdate) SetStartedAt(t time.Time) *SIPUpdate { + su.mutation.SetStartedAt(t) + return su +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (su *SIPUpdate) SetNillableStartedAt(t *time.Time) *SIPUpdate { + if t != nil { + su.SetStartedAt(*t) + } + return su +} + +// ClearStartedAt clears the value of the "started_at" field. +func (su *SIPUpdate) ClearStartedAt() *SIPUpdate { + su.mutation.ClearStartedAt() + return su +} + +// SetCompletedAt sets the "completed_at" field. +func (su *SIPUpdate) SetCompletedAt(t time.Time) *SIPUpdate { + su.mutation.SetCompletedAt(t) + return su +} + +// SetNillableCompletedAt sets the "completed_at" field if the given value is not nil. +func (su *SIPUpdate) SetNillableCompletedAt(t *time.Time) *SIPUpdate { + if t != nil { + su.SetCompletedAt(*t) + } + return su +} + +// ClearCompletedAt clears the value of the "completed_at" field. +func (su *SIPUpdate) ClearCompletedAt() *SIPUpdate { + su.mutation.ClearCompletedAt() + return su +} + +// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by IDs. +func (su *SIPUpdate) AddPreservationActionIDs(ids ...int) *SIPUpdate { + su.mutation.AddPreservationActionIDs(ids...) + return su +} + +// AddPreservationActions adds the "preservation_actions" edges to the PreservationAction entity. +func (su *SIPUpdate) AddPreservationActions(p ...*PreservationAction) *SIPUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return su.AddPreservationActionIDs(ids...) +} + +// Mutation returns the SIPMutation object of the builder. +func (su *SIPUpdate) Mutation() *SIPMutation { + return su.mutation +} + +// ClearPreservationActions clears all "preservation_actions" edges to the PreservationAction entity. +func (su *SIPUpdate) ClearPreservationActions() *SIPUpdate { + su.mutation.ClearPreservationActions() + return su +} + +// RemovePreservationActionIDs removes the "preservation_actions" edge to PreservationAction entities by IDs. +func (su *SIPUpdate) RemovePreservationActionIDs(ids ...int) *SIPUpdate { + su.mutation.RemovePreservationActionIDs(ids...) + return su +} + +// RemovePreservationActions removes "preservation_actions" edges to PreservationAction entities. +func (su *SIPUpdate) RemovePreservationActions(p ...*PreservationAction) *SIPUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return su.RemovePreservationActionIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (su *SIPUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, su.sqlSave, su.mutation, su.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (su *SIPUpdate) SaveX(ctx context.Context) int { + affected, err := su.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (su *SIPUpdate) Exec(ctx context.Context) error { + _, err := su.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (su *SIPUpdate) ExecX(ctx context.Context) { + if err := su.Exec(ctx); err != nil { + panic(err) + } +} + +func (su *SIPUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(sip.Table, sip.Columns, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + if ps := su.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := su.mutation.Name(); ok { + _spec.SetField(sip.FieldName, field.TypeString, value) + } + if value, ok := su.mutation.WorkflowID(); ok { + _spec.SetField(sip.FieldWorkflowID, field.TypeString, value) + } + if value, ok := su.mutation.RunID(); ok { + _spec.SetField(sip.FieldRunID, field.TypeUUID, value) + } + if value, ok := su.mutation.AipID(); ok { + _spec.SetField(sip.FieldAipID, field.TypeUUID, value) + } + if su.mutation.AipIDCleared() { + _spec.ClearField(sip.FieldAipID, field.TypeUUID) + } + if value, ok := su.mutation.LocationID(); ok { + _spec.SetField(sip.FieldLocationID, field.TypeUUID, value) + } + if su.mutation.LocationIDCleared() { + _spec.ClearField(sip.FieldLocationID, field.TypeUUID) + } + if value, ok := su.mutation.Status(); ok { + _spec.SetField(sip.FieldStatus, field.TypeInt8, value) + } + if value, ok := su.mutation.AddedStatus(); ok { + _spec.AddField(sip.FieldStatus, field.TypeInt8, value) + } + if value, ok := su.mutation.StartedAt(); ok { + _spec.SetField(sip.FieldStartedAt, field.TypeTime, value) + } + if su.mutation.StartedAtCleared() { + _spec.ClearField(sip.FieldStartedAt, field.TypeTime) + } + if value, ok := su.mutation.CompletedAt(); ok { + _spec.SetField(sip.FieldCompletedAt, field.TypeTime, value) + } + if su.mutation.CompletedAtCleared() { + _spec.ClearField(sip.FieldCompletedAt, field.TypeTime) + } + if su.mutation.PreservationActionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := su.mutation.RemovedPreservationActionsIDs(); len(nodes) > 0 && !su.mutation.PreservationActionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := su.mutation.PreservationActionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, su.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{sip.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + su.mutation.done = true + return n, nil +} + +// SIPUpdateOne is the builder for updating a single SIP entity. +type SIPUpdateOne struct { + config + fields []string + hooks []Hook + mutation *SIPMutation +} + +// SetName sets the "name" field. +func (suo *SIPUpdateOne) SetName(s string) *SIPUpdateOne { + suo.mutation.SetName(s) + return suo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableName(s *string) *SIPUpdateOne { + if s != nil { + suo.SetName(*s) + } + return suo +} + +// SetWorkflowID sets the "workflow_id" field. +func (suo *SIPUpdateOne) SetWorkflowID(s string) *SIPUpdateOne { + suo.mutation.SetWorkflowID(s) + return suo +} + +// SetNillableWorkflowID sets the "workflow_id" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableWorkflowID(s *string) *SIPUpdateOne { + if s != nil { + suo.SetWorkflowID(*s) + } + return suo +} + +// SetRunID sets the "run_id" field. +func (suo *SIPUpdateOne) SetRunID(u uuid.UUID) *SIPUpdateOne { + suo.mutation.SetRunID(u) + return suo +} + +// SetNillableRunID sets the "run_id" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableRunID(u *uuid.UUID) *SIPUpdateOne { + if u != nil { + suo.SetRunID(*u) + } + return suo +} + +// SetAipID sets the "aip_id" field. +func (suo *SIPUpdateOne) SetAipID(u uuid.UUID) *SIPUpdateOne { + suo.mutation.SetAipID(u) + return suo +} + +// SetNillableAipID sets the "aip_id" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableAipID(u *uuid.UUID) *SIPUpdateOne { + if u != nil { + suo.SetAipID(*u) + } + return suo +} + +// ClearAipID clears the value of the "aip_id" field. +func (suo *SIPUpdateOne) ClearAipID() *SIPUpdateOne { + suo.mutation.ClearAipID() + return suo +} + +// SetLocationID sets the "location_id" field. +func (suo *SIPUpdateOne) SetLocationID(u uuid.UUID) *SIPUpdateOne { + suo.mutation.SetLocationID(u) + return suo +} + +// SetNillableLocationID sets the "location_id" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableLocationID(u *uuid.UUID) *SIPUpdateOne { + if u != nil { + suo.SetLocationID(*u) + } + return suo +} + +// ClearLocationID clears the value of the "location_id" field. +func (suo *SIPUpdateOne) ClearLocationID() *SIPUpdateOne { + suo.mutation.ClearLocationID() + return suo +} + +// SetStatus sets the "status" field. +func (suo *SIPUpdateOne) SetStatus(i int8) *SIPUpdateOne { + suo.mutation.ResetStatus() + suo.mutation.SetStatus(i) + return suo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableStatus(i *int8) *SIPUpdateOne { + if i != nil { + suo.SetStatus(*i) + } + return suo +} + +// AddStatus adds i to the "status" field. +func (suo *SIPUpdateOne) AddStatus(i int8) *SIPUpdateOne { + suo.mutation.AddStatus(i) + return suo +} + +// SetStartedAt sets the "started_at" field. +func (suo *SIPUpdateOne) SetStartedAt(t time.Time) *SIPUpdateOne { + suo.mutation.SetStartedAt(t) + return suo +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableStartedAt(t *time.Time) *SIPUpdateOne { + if t != nil { + suo.SetStartedAt(*t) + } + return suo +} + +// ClearStartedAt clears the value of the "started_at" field. +func (suo *SIPUpdateOne) ClearStartedAt() *SIPUpdateOne { + suo.mutation.ClearStartedAt() + return suo +} + +// SetCompletedAt sets the "completed_at" field. +func (suo *SIPUpdateOne) SetCompletedAt(t time.Time) *SIPUpdateOne { + suo.mutation.SetCompletedAt(t) + return suo +} + +// SetNillableCompletedAt sets the "completed_at" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableCompletedAt(t *time.Time) *SIPUpdateOne { + if t != nil { + suo.SetCompletedAt(*t) + } + return suo +} + +// ClearCompletedAt clears the value of the "completed_at" field. +func (suo *SIPUpdateOne) ClearCompletedAt() *SIPUpdateOne { + suo.mutation.ClearCompletedAt() + return suo +} + +// AddPreservationActionIDs adds the "preservation_actions" edge to the PreservationAction entity by IDs. +func (suo *SIPUpdateOne) AddPreservationActionIDs(ids ...int) *SIPUpdateOne { + suo.mutation.AddPreservationActionIDs(ids...) + return suo +} + +// AddPreservationActions adds the "preservation_actions" edges to the PreservationAction entity. +func (suo *SIPUpdateOne) AddPreservationActions(p ...*PreservationAction) *SIPUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return suo.AddPreservationActionIDs(ids...) +} + +// Mutation returns the SIPMutation object of the builder. +func (suo *SIPUpdateOne) Mutation() *SIPMutation { + return suo.mutation +} + +// ClearPreservationActions clears all "preservation_actions" edges to the PreservationAction entity. +func (suo *SIPUpdateOne) ClearPreservationActions() *SIPUpdateOne { + suo.mutation.ClearPreservationActions() + return suo +} + +// RemovePreservationActionIDs removes the "preservation_actions" edge to PreservationAction entities by IDs. +func (suo *SIPUpdateOne) RemovePreservationActionIDs(ids ...int) *SIPUpdateOne { + suo.mutation.RemovePreservationActionIDs(ids...) + return suo +} + +// RemovePreservationActions removes "preservation_actions" edges to PreservationAction entities. +func (suo *SIPUpdateOne) RemovePreservationActions(p ...*PreservationAction) *SIPUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return suo.RemovePreservationActionIDs(ids...) +} + +// Where appends a list predicates to the SIPUpdate builder. +func (suo *SIPUpdateOne) Where(ps ...predicate.SIP) *SIPUpdateOne { + suo.mutation.Where(ps...) + return suo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (suo *SIPUpdateOne) Select(field string, fields ...string) *SIPUpdateOne { + suo.fields = append([]string{field}, fields...) + return suo +} + +// Save executes the query and returns the updated SIP entity. +func (suo *SIPUpdateOne) Save(ctx context.Context) (*SIP, error) { + return withHooks(ctx, suo.sqlSave, suo.mutation, suo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (suo *SIPUpdateOne) SaveX(ctx context.Context) *SIP { + node, err := suo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (suo *SIPUpdateOne) Exec(ctx context.Context) error { + _, err := suo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (suo *SIPUpdateOne) ExecX(ctx context.Context) { + if err := suo.Exec(ctx); err != nil { + panic(err) + } +} + +func (suo *SIPUpdateOne) sqlSave(ctx context.Context) (_node *SIP, err error) { + _spec := sqlgraph.NewUpdateSpec(sip.Table, sip.Columns, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + id, ok := suo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "SIP.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := suo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, sip.FieldID) + for _, f := range fields { + if !sip.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != sip.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := suo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := suo.mutation.Name(); ok { + _spec.SetField(sip.FieldName, field.TypeString, value) + } + if value, ok := suo.mutation.WorkflowID(); ok { + _spec.SetField(sip.FieldWorkflowID, field.TypeString, value) + } + if value, ok := suo.mutation.RunID(); ok { + _spec.SetField(sip.FieldRunID, field.TypeUUID, value) + } + if value, ok := suo.mutation.AipID(); ok { + _spec.SetField(sip.FieldAipID, field.TypeUUID, value) + } + if suo.mutation.AipIDCleared() { + _spec.ClearField(sip.FieldAipID, field.TypeUUID) + } + if value, ok := suo.mutation.LocationID(); ok { + _spec.SetField(sip.FieldLocationID, field.TypeUUID, value) + } + if suo.mutation.LocationIDCleared() { + _spec.ClearField(sip.FieldLocationID, field.TypeUUID) + } + if value, ok := suo.mutation.Status(); ok { + _spec.SetField(sip.FieldStatus, field.TypeInt8, value) + } + if value, ok := suo.mutation.AddedStatus(); ok { + _spec.AddField(sip.FieldStatus, field.TypeInt8, value) + } + if value, ok := suo.mutation.StartedAt(); ok { + _spec.SetField(sip.FieldStartedAt, field.TypeTime, value) + } + if suo.mutation.StartedAtCleared() { + _spec.ClearField(sip.FieldStartedAt, field.TypeTime) + } + if value, ok := suo.mutation.CompletedAt(); ok { + _spec.SetField(sip.FieldCompletedAt, field.TypeTime, value) + } + if suo.mutation.CompletedAtCleared() { + _spec.ClearField(sip.FieldCompletedAt, field.TypeTime) + } + if suo.mutation.PreservationActionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := suo.mutation.RemovedPreservationActionsIDs(); len(nodes) > 0 && !suo.mutation.PreservationActionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := suo.mutation.PreservationActionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: sip.PreservationActionsTable, + Columns: []string{sip.PreservationActionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(preservationaction.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &SIP{config: suo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, suo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{sip.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + suo.mutation.done = true + return _node, nil +} diff --git a/internal/persistence/ent/db/tx.go b/internal/persistence/ent/db/tx.go index e2871a482..728df6002 100644 --- a/internal/persistence/ent/db/tx.go +++ b/internal/persistence/ent/db/tx.go @@ -12,12 +12,12 @@ import ( // Tx is a transactional client that is created by calling Client.Tx(). type Tx struct { config - // Pkg is the client for interacting with the Pkg builders. - Pkg *PkgClient // PreservationAction is the client for interacting with the PreservationAction builders. PreservationAction *PreservationActionClient // PreservationTask is the client for interacting with the PreservationTask builders. PreservationTask *PreservationTaskClient + // SIP is the client for interacting with the SIP builders. + SIP *SIPClient // lazily loaded. client *Client @@ -149,9 +149,9 @@ func (tx *Tx) Client() *Client { } func (tx *Tx) init() { - tx.Pkg = NewPkgClient(tx.config) tx.PreservationAction = NewPreservationActionClient(tx.config) tx.PreservationTask = NewPreservationTaskClient(tx.config) + tx.SIP = NewSIPClient(tx.config) } // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. @@ -161,7 +161,7 @@ func (tx *Tx) init() { // of them in order to commit or rollback the transaction. // // If a closed transaction is embedded in one of the generated entities, and the entity -// applies a query, for example: Pkg.QueryXXX(), the query will be executed +// applies a query, for example: PreservationAction.QueryXXX(), the query will be executed // through the driver which created this transaction. // // Note that txDriver is not goroutine safe. diff --git a/internal/persistence/ent/schema/preservation_action.go b/internal/persistence/ent/schema/preservation_action.go index 6b1c91631..f56e1265e 100644 --- a/internal/persistence/ent/schema/preservation_action.go +++ b/internal/persistence/ent/schema/preservation_action.go @@ -33,7 +33,7 @@ func (PreservationAction) Fields() []ent.Field { Optional(), field.Time("completed_at"). Optional(), - field.Int("package_id"). + field.Int("sip_id"). Positive(), } } @@ -41,11 +41,11 @@ func (PreservationAction) Fields() []ent.Field { // Edges of the PreservationAction. func (PreservationAction) Edges() []ent.Edge { return []ent.Edge{ - edge.From("package", Pkg.Type). + edge.From("sip", SIP.Type). Ref("preservation_actions"). Unique(). Required(). - Field("package_id"), + Field("sip_id"), edge.To("tasks", PreservationTask.Type). Annotations(entsql.OnDelete(entsql.Cascade)), } diff --git a/internal/persistence/ent/schema/pkg.go b/internal/persistence/ent/schema/sip.go similarity index 63% rename from internal/persistence/ent/schema/pkg.go rename to internal/persistence/ent/schema/sip.go index 57d4080d2..2705c4fc6 100644 --- a/internal/persistence/ent/schema/pkg.go +++ b/internal/persistence/ent/schema/sip.go @@ -12,20 +12,20 @@ import ( "github.com/google/uuid" ) -// Pkg holds the schema definition for the Pkg entity. -type Pkg struct { +// SIP holds the schema definition for the SIP entity. +type SIP struct { ent.Schema } -// Annotations of the Pkg. -func (Pkg) Annotations() []schema.Annotation { +// Annotations of the SIP. +func (SIP) Annotations() []schema.Annotation { return []schema.Annotation{ - entsql.Annotation{Table: "package"}, + entsql.Annotation{Table: "sip"}, } } -// Fields of the Pkg. -func (Pkg) Fields() []ent.Field { +// Fields of the SIP. +func (SIP) Fields() []ent.Field { return []ent.Field{ field.String("name"). Annotations(entsql.Annotation{ @@ -35,8 +35,7 @@ func (Pkg) Fields() []ent.Field { Annotations(entsql.Annotation{ Size: 255, }), - field.UUID("run_id", uuid.UUID{}). - Unique(), + field.UUID("run_id", uuid.UUID{}), field.UUID("aip_id", uuid.UUID{}). Optional(), field.UUID("location_id", uuid.UUID{}). @@ -52,29 +51,29 @@ func (Pkg) Fields() []ent.Field { } } -// Edges of the Pkg. -func (Pkg) Edges() []ent.Edge { +// Edges of the SIP. +func (SIP) Edges() []ent.Edge { return []ent.Edge{ edge.To("preservation_actions", PreservationAction.Type). Annotations(entsql.OnDelete(entsql.Cascade)), } } -// Indexes of the Pkg. -func (Pkg) Indexes() []ent.Index { +// Indexes of the SIP. +func (SIP) Indexes() []ent.Index { return []ent.Index{ index.Fields("name"). - StorageKey("package_name_idx"). + StorageKey("sip_name_idx"). Annotations(entsql.Prefix(50)), index.Fields("aip_id"). - StorageKey("package_aip_id_idx"), + StorageKey("sip_aip_id_idx"), index.Fields("location_id"). - StorageKey("package_location_id_idx"), + StorageKey("sip_location_id_idx"), index.Fields("status"). - StorageKey("package_status_idx"), + StorageKey("sip_status_idx"), index.Fields("created_at"). - StorageKey("package_created_at_idx"), + StorageKey("sip_created_at_idx"), index.Fields("started_at"). - StorageKey("package_started_at_idx"), + StorageKey("sip_started_at_idx"), } } diff --git a/internal/persistence/fake/mock_persistence.go b/internal/persistence/fake/mock_persistence.go index cc027a9b3..84fe7e7a3 100644 --- a/internal/persistence/fake/mock_persistence.go +++ b/internal/persistence/fake/mock_persistence.go @@ -41,44 +41,6 @@ func (m *MockService) EXPECT() *MockServiceMockRecorder { return m.recorder } -// CreatePackage mocks base method. -func (m *MockService) CreatePackage(arg0 context.Context, arg1 *datatypes.Package) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreatePackage", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreatePackage indicates an expected call of CreatePackage. -func (mr *MockServiceMockRecorder) CreatePackage(arg0, arg1 any) *MockServiceCreatePackageCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePackage", reflect.TypeOf((*MockService)(nil).CreatePackage), arg0, arg1) - return &MockServiceCreatePackageCall{Call: call} -} - -// MockServiceCreatePackageCall wrap *gomock.Call -type MockServiceCreatePackageCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockServiceCreatePackageCall) Return(arg0 error) *MockServiceCreatePackageCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockServiceCreatePackageCall) Do(f func(context.Context, *datatypes.Package) error) *MockServiceCreatePackageCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceCreatePackageCall) DoAndReturn(f func(context.Context, *datatypes.Package) error) *MockServiceCreatePackageCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // CreatePreservationAction mocks base method. func (m *MockService) CreatePreservationAction(arg0 context.Context, arg1 *datatypes.PreservationAction) error { m.ctrl.T.Helper() @@ -155,81 +117,80 @@ func (c *MockServiceCreatePreservationTaskCall) DoAndReturn(f func(context.Conte return c } -// ListPackages mocks base method. -func (m *MockService) ListPackages(arg0 context.Context, arg1 *persistence.PackageFilter) ([]*datatypes.Package, *persistence.Page, error) { +// CreateSIP mocks base method. +func (m *MockService) CreateSIP(arg0 context.Context, arg1 *datatypes.SIP) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPackages", arg0, arg1) - ret0, _ := ret[0].([]*datatypes.Package) - ret1, _ := ret[1].(*persistence.Page) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret := m.ctrl.Call(m, "CreateSIP", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 } -// ListPackages indicates an expected call of ListPackages. -func (mr *MockServiceMockRecorder) ListPackages(arg0, arg1 any) *MockServiceListPackagesCall { +// CreateSIP indicates an expected call of CreateSIP. +func (mr *MockServiceMockRecorder) CreateSIP(arg0, arg1 any) *MockServiceCreateSIPCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPackages", reflect.TypeOf((*MockService)(nil).ListPackages), arg0, arg1) - return &MockServiceListPackagesCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSIP", reflect.TypeOf((*MockService)(nil).CreateSIP), arg0, arg1) + return &MockServiceCreateSIPCall{Call: call} } -// MockServiceListPackagesCall wrap *gomock.Call -type MockServiceListPackagesCall struct { +// MockServiceCreateSIPCall wrap *gomock.Call +type MockServiceCreateSIPCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockServiceListPackagesCall) Return(arg0 []*datatypes.Package, arg1 *persistence.Page, arg2 error) *MockServiceListPackagesCall { - c.Call = c.Call.Return(arg0, arg1, arg2) +func (c *MockServiceCreateSIPCall) Return(arg0 error) *MockServiceCreateSIPCall { + c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockServiceListPackagesCall) Do(f func(context.Context, *persistence.PackageFilter) ([]*datatypes.Package, *persistence.Page, error)) *MockServiceListPackagesCall { +func (c *MockServiceCreateSIPCall) Do(f func(context.Context, *datatypes.SIP) error) *MockServiceCreateSIPCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceListPackagesCall) DoAndReturn(f func(context.Context, *persistence.PackageFilter) ([]*datatypes.Package, *persistence.Page, error)) *MockServiceListPackagesCall { +func (c *MockServiceCreateSIPCall) DoAndReturn(f func(context.Context, *datatypes.SIP) error) *MockServiceCreateSIPCall { c.Call = c.Call.DoAndReturn(f) return c } -// UpdatePackage mocks base method. -func (m *MockService) UpdatePackage(arg0 context.Context, arg1 int, arg2 persistence.PackageUpdater) (*datatypes.Package, error) { +// ListSIPs mocks base method. +func (m *MockService) ListSIPs(arg0 context.Context, arg1 *persistence.SIPFilter) ([]*datatypes.SIP, *persistence.Page, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdatePackage", arg0, arg1, arg2) - ret0, _ := ret[0].(*datatypes.Package) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ListSIPs", arg0, arg1) + ret0, _ := ret[0].([]*datatypes.SIP) + ret1, _ := ret[1].(*persistence.Page) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } -// UpdatePackage indicates an expected call of UpdatePackage. -func (mr *MockServiceMockRecorder) UpdatePackage(arg0, arg1, arg2 any) *MockServiceUpdatePackageCall { +// ListSIPs indicates an expected call of ListSIPs. +func (mr *MockServiceMockRecorder) ListSIPs(arg0, arg1 any) *MockServiceListSIPsCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePackage", reflect.TypeOf((*MockService)(nil).UpdatePackage), arg0, arg1, arg2) - return &MockServiceUpdatePackageCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSIPs", reflect.TypeOf((*MockService)(nil).ListSIPs), arg0, arg1) + return &MockServiceListSIPsCall{Call: call} } -// MockServiceUpdatePackageCall wrap *gomock.Call -type MockServiceUpdatePackageCall struct { +// MockServiceListSIPsCall wrap *gomock.Call +type MockServiceListSIPsCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockServiceUpdatePackageCall) Return(arg0 *datatypes.Package, arg1 error) *MockServiceUpdatePackageCall { - c.Call = c.Call.Return(arg0, arg1) +func (c *MockServiceListSIPsCall) Return(arg0 []*datatypes.SIP, arg1 *persistence.Page, arg2 error) *MockServiceListSIPsCall { + c.Call = c.Call.Return(arg0, arg1, arg2) return c } // Do rewrite *gomock.Call.Do -func (c *MockServiceUpdatePackageCall) Do(f func(context.Context, int, persistence.PackageUpdater) (*datatypes.Package, error)) *MockServiceUpdatePackageCall { +func (c *MockServiceListSIPsCall) Do(f func(context.Context, *persistence.SIPFilter) ([]*datatypes.SIP, *persistence.Page, error)) *MockServiceListSIPsCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceUpdatePackageCall) DoAndReturn(f func(context.Context, int, persistence.PackageUpdater) (*datatypes.Package, error)) *MockServiceUpdatePackageCall { +func (c *MockServiceListSIPsCall) DoAndReturn(f func(context.Context, *persistence.SIPFilter) ([]*datatypes.SIP, *persistence.Page, error)) *MockServiceListSIPsCall { c.Call = c.Call.DoAndReturn(f) return c } @@ -272,3 +233,42 @@ func (c *MockServiceUpdatePreservationTaskCall) DoAndReturn(f func(context.Conte c.Call = c.Call.DoAndReturn(f) return c } + +// UpdateSIP mocks base method. +func (m *MockService) UpdateSIP(arg0 context.Context, arg1 int, arg2 persistence.SIPUpdater) (*datatypes.SIP, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSIP", arg0, arg1, arg2) + ret0, _ := ret[0].(*datatypes.SIP) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSIP indicates an expected call of UpdateSIP. +func (mr *MockServiceMockRecorder) UpdateSIP(arg0, arg1, arg2 any) *MockServiceUpdateSIPCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSIP", reflect.TypeOf((*MockService)(nil).UpdateSIP), arg0, arg1, arg2) + return &MockServiceUpdateSIPCall{Call: call} +} + +// MockServiceUpdateSIPCall wrap *gomock.Call +type MockServiceUpdateSIPCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockServiceUpdateSIPCall) Return(arg0 *datatypes.SIP, arg1 error) *MockServiceUpdateSIPCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockServiceUpdateSIPCall) Do(f func(context.Context, int, persistence.SIPUpdater) (*datatypes.SIP, error)) *MockServiceUpdateSIPCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockServiceUpdateSIPCall) DoAndReturn(f func(context.Context, int, persistence.SIPUpdater) (*datatypes.SIP, error)) *MockServiceUpdateSIPCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/internal/persistence/filter.go b/internal/persistence/filter.go index d815c1886..00b5a1d45 100644 --- a/internal/persistence/filter.go +++ b/internal/persistence/filter.go @@ -61,13 +61,13 @@ func (p *Page) Goa() *goapackage.EnduroPage { } } -type PackageFilter struct { +type SIPFilter struct { // Name filters for packages whose names contain the given string. Name *string AIPID *uuid.UUID LocationID *uuid.UUID - Status *enums.PackageStatus + Status *enums.SIPStatus CreatedAt *timerange.Range Sort diff --git a/internal/persistence/persistence.go b/internal/persistence/persistence.go index 5575bc5f1..eb98c8199 100644 --- a/internal/persistence/persistence.go +++ b/internal/persistence/persistence.go @@ -19,17 +19,17 @@ var ( ) type ( - PackageUpdater func(*datatypes.Package) (*datatypes.Package, error) + SIPUpdater func(*datatypes.SIP) (*datatypes.SIP, error) PresTaskUpdater func(*datatypes.PreservationTask) (*datatypes.PreservationTask, error) ) type Service interface { - // CreatePackage persists the given Package to the data store then updates - // the Package from the data store, adding auto-generated data + // CreateSIP persists the given SIP to the data store then updates + // the SIP from the data store, adding auto-generated data // (e.g. ID, CreatedAt). - CreatePackage(context.Context, *datatypes.Package) error - UpdatePackage(context.Context, int, PackageUpdater) (*datatypes.Package, error) - ListPackages(context.Context, *PackageFilter) ([]*datatypes.Package, *Page, error) + CreateSIP(context.Context, *datatypes.SIP) error + UpdateSIP(context.Context, int, SIPUpdater) (*datatypes.SIP, error) + ListSIPs(context.Context, *SIPFilter) ([]*datatypes.SIP, *Page, error) CreatePreservationAction(context.Context, *datatypes.PreservationAction) error diff --git a/internal/persistence/telemetry.go b/internal/persistence/telemetry.go index 1b2a09d61..98f2992cb 100644 --- a/internal/persistence/telemetry.go +++ b/internal/persistence/telemetry.go @@ -31,41 +31,41 @@ func updateError(err error, name string) error { return fmt.Errorf("%s: %w", name, err) } -func (w *wrapper) CreatePackage(ctx context.Context, p *datatypes.Package) error { - ctx, span := w.tracer.Start(ctx, "CreatePackage") +func (w *wrapper) CreateSIP(ctx context.Context, p *datatypes.SIP) error { + ctx, span := w.tracer.Start(ctx, "CreateSIP") defer span.End() - err := w.wrapped.CreatePackage(ctx, p) + err := w.wrapped.CreateSIP(ctx, p) if err != nil { telemetry.RecordError(span, err) - return updateError(err, "CreatePackage") + return updateError(err, "CreateSIP") } return nil } -func (w *wrapper) UpdatePackage(ctx context.Context, id int, updater PackageUpdater) (*datatypes.Package, error) { - ctx, span := w.tracer.Start(ctx, "UpdatePackage") +func (w *wrapper) UpdateSIP(ctx context.Context, id int, updater SIPUpdater) (*datatypes.SIP, error) { + ctx, span := w.tracer.Start(ctx, "UpdateSIP") defer span.End() span.SetAttributes(attribute.Int("id", id)) - r, err := w.wrapped.UpdatePackage(ctx, id, updater) + r, err := w.wrapped.UpdateSIP(ctx, id, updater) if err != nil { telemetry.RecordError(span, err) - return nil, updateError(err, "UpdatePackage") + return nil, updateError(err, "UpdateSIP") } return r, nil } -func (w *wrapper) ListPackages(ctx context.Context, f *PackageFilter) ([]*datatypes.Package, *Page, error) { - ctx, span := w.tracer.Start(ctx, "ListPackages") +func (w *wrapper) ListSIPs(ctx context.Context, f *SIPFilter) ([]*datatypes.SIP, *Page, error) { + ctx, span := w.tracer.Start(ctx, "ListSIPs") defer span.End() - r, pg, err := w.wrapped.ListPackages(ctx, f) + r, pg, err := w.wrapped.ListSIPs(ctx, f) if err != nil { telemetry.RecordError(span, err) - return nil, nil, updateError(err, "ListPackages") + return nil, nil, updateError(err, "ListSIPs") } return r, pg, nil diff --git a/internal/storage/fake/mock_storage.go b/internal/storage/fake/mock_storage.go index e91c98a16..add24c7a4 100644 --- a/internal/storage/fake/mock_storage.go +++ b/internal/storage/fake/mock_storage.go @@ -703,7 +703,7 @@ func (c *MockServiceUpdatePackageLocationIDCall) DoAndReturn(f func(context.Cont } // UpdatePackageStatus mocks base method. -func (m *MockService) UpdatePackageStatus(arg0 context.Context, arg1 uuid.UUID, arg2 types.PackageStatus) error { +func (m *MockService) UpdatePackageStatus(arg0 context.Context, arg1 uuid.UUID, arg2 types.AIPStatus) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdatePackageStatus", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -729,13 +729,13 @@ func (c *MockServiceUpdatePackageStatusCall) Return(arg0 error) *MockServiceUpda } // Do rewrite *gomock.Call.Do -func (c *MockServiceUpdatePackageStatusCall) Do(f func(context.Context, uuid.UUID, types.PackageStatus) error) *MockServiceUpdatePackageStatusCall { +func (c *MockServiceUpdatePackageStatusCall) Do(f func(context.Context, uuid.UUID, types.AIPStatus) error) *MockServiceUpdatePackageStatusCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceUpdatePackageStatusCall) DoAndReturn(f func(context.Context, uuid.UUID, types.PackageStatus) error) *MockServiceUpdatePackageStatusCall { +func (c *MockServiceUpdatePackageStatusCall) DoAndReturn(f func(context.Context, uuid.UUID, types.AIPStatus) error) *MockServiceUpdatePackageStatusCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/internal/storage/local_activities.go b/internal/storage/local_activities.go index e91981df1..98d08eb49 100644 --- a/internal/storage/local_activities.go +++ b/internal/storage/local_activities.go @@ -23,7 +23,7 @@ func UpdatePackageLocationLocalActivity( type UpdatePackageStatusLocalActivityParams struct { AIPID uuid.UUID - Status types.PackageStatus + Status types.AIPStatus } func UpdatePackageStatusLocalActivity( diff --git a/internal/storage/persistence/ent/client/client.go b/internal/storage/persistence/ent/client/client.go index 36d82e272..c21e2603a 100644 --- a/internal/storage/persistence/ent/client/client.go +++ b/internal/storage/persistence/ent/client/client.go @@ -11,8 +11,8 @@ import ( goastorage "github.com/artefactual-sdps/enduro/internal/api/gen/storage" "github.com/artefactual-sdps/enduro/internal/storage/persistence" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/types" ) @@ -28,13 +28,13 @@ func NewClient(c *db.Client) *Client { return &Client{c: c} } -func (c *Client) CreatePackage(ctx context.Context, goapkg *goastorage.Package) (*goastorage.Package, error) { - q := c.c.Pkg.Create() +func (c *Client) CreateAIP(ctx context.Context, goapkg *goastorage.Package) (*goastorage.Package, error) { + q := c.c.AIP.Create() q.SetName(goapkg.Name) q.SetAipID(goapkg.AipID) q.SetObjectKey(goapkg.ObjectKey) - q.SetStatus(types.NewPackageStatus(goapkg.Status)) + q.SetStatus(types.NewAIPStatus(goapkg.Status)) if goapkg.LocationID != nil { id, err := c.c.Location.Query(). @@ -52,46 +52,46 @@ func (c *Client) CreatePackage(ctx context.Context, goapkg *goastorage.Package) q.SetLocationID(id) } - pkg, err := q.Save(ctx) + a, err := q.Save(ctx) if err != nil { return nil, err } - return pkgAsGoa(ctx, pkg), nil + return aipAsGoa(ctx, a), nil } -func (c *Client) ListPackages(ctx context.Context) (goastorage.PackageCollection, error) { +func (c *Client) ListAIPs(ctx context.Context) (goastorage.PackageCollection, error) { pkgs := []*goastorage.Package{} - res, err := c.c.Pkg.Query().All(ctx) + res, err := c.c.AIP.Query().All(ctx) for _, item := range res { - pkgs = append(pkgs, pkgAsGoa(ctx, item)) + pkgs = append(pkgs, aipAsGoa(ctx, item)) } return pkgs, err } -func (c *Client) ReadPackage(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) { - pkg, err := c.c.Pkg.Query(). +func (c *Client) ReadAIP(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) { + a, err := c.c.AIP.Query(). Where( - pkg.AipID(aipID), + aip.AipID(aipID), ). Only(ctx) if err != nil { if db.IsNotFound(err) { - return nil, &goastorage.PackageNotFound{AipID: aipID, Message: "package not found"} + return nil, &goastorage.PackageNotFound{AipID: aipID, Message: "AIP not found"} } else { return nil, goastorage.MakeNotAvailable(errors.New("cannot perform operation")) } } - return pkgAsGoa(ctx, pkg), nil + return aipAsGoa(ctx, a), nil } -func (c *Client) UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.PackageStatus) error { - n, err := c.c.Pkg.Update(). +func (c *Client) UpdateAIPStatus(ctx context.Context, aipID uuid.UUID, status types.AIPStatus) error { + n, err := c.c.AIP.Update(). Where( - pkg.AipID(aipID), + aip.AipID(aipID), ). SetStatus(status). Save(ctx) @@ -106,7 +106,7 @@ func (c *Client) UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, statu return nil } -func (c *Client) UpdatePackageLocationID(ctx context.Context, aipID, locationID uuid.UUID) error { +func (c *Client) UpdateAIPLocationID(ctx context.Context, aipID, locationID uuid.UUID) error { l, err := c.c.Location.Query(). Where( location.UUID(locationID), @@ -116,9 +116,9 @@ func (c *Client) UpdatePackageLocationID(ctx context.Context, aipID, locationID return err } - n, err := c.c.Pkg.Update(). + n, err := c.c.AIP.Update(). Where( - pkg.AipID(aipID), + aip.AipID(aipID), ). SetLocation(l). Save(ctx) @@ -133,17 +133,17 @@ func (c *Client) UpdatePackageLocationID(ctx context.Context, aipID, locationID return nil } -func pkgAsGoa(ctx context.Context, pkg *db.Pkg) *goastorage.Package { +func aipAsGoa(ctx context.Context, a *db.AIP) *goastorage.Package { p := &goastorage.Package{ - Name: pkg.Name, - AipID: pkg.AipID, - Status: pkg.Status.String(), - ObjectKey: pkg.ObjectKey, - CreatedAt: pkg.CreatedAt.Format(time.RFC3339), + Name: a.Name, + AipID: a.AipID, + Status: a.Status.String(), + ObjectKey: a.ObjectKey, + CreatedAt: a.CreatedAt.Format(time.RFC3339), } // TODO: should we use UUID as the foreign key? - l, err := pkg.QueryLocation().Only(ctx) + l, err := a.QueryLocation().Only(ctx) if err == nil { p.LocationID = &l.UUID } @@ -247,15 +247,15 @@ func locationAsGoa(loc *db.Location) *goastorage.Location { return l } -func (c *Client) LocationPackages(ctx context.Context, locationID uuid.UUID) (goastorage.PackageCollection, error) { - res, err := c.c.Location.Query().Where(location.UUID(locationID)).QueryPackages().All(ctx) +func (c *Client) LocationAIPs(ctx context.Context, locationID uuid.UUID) (goastorage.PackageCollection, error) { + res, err := c.c.Location.Query().Where(location.UUID(locationID)).QueryAips().All(ctx) if err != nil { return nil, err } packages := []*goastorage.Package{} for _, item := range res { - packages = append(packages, pkgAsGoa(ctx, item)) + packages = append(packages, aipAsGoa(ctx, item)) } return packages, nil diff --git a/internal/storage/persistence/ent/client/client_test.go b/internal/storage/persistence/ent/client/client_test.go index c300e8973..c562d67b2 100644 --- a/internal/storage/persistence/ent/client/client_test.go +++ b/internal/storage/persistence/ent/client/client_test.go @@ -15,10 +15,10 @@ import ( goastorage "github.com/artefactual-sdps/enduro/internal/api/gen/storage" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/client" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/enttest" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/hook" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/types" ) @@ -44,8 +44,8 @@ func setUpClient(t *testing.T) (*db.Client, *client.Client) { c := client.NewClient(entc) // Use ent Hooks to set the create_at fields to a fixed value - entc.Pkg.Use(func(next ent.Mutator) ent.Mutator { - return hook.PkgFunc(func(ctx context.Context, m *db.PkgMutation) (ent.Value, error) { + entc.AIP.Use(func(next ent.Mutator) ent.Mutator { + return hook.AIPFunc(func(ctx context.Context, m *db.AIPMutation) (ent.Value, error) { if m.Op() == db.OpCreate { m.SetCreatedAt(fakeNow()) } @@ -64,7 +64,7 @@ func setUpClient(t *testing.T) (*db.Client, *client.Client) { return entc, c } -func TestCreatePackage(t *testing.T) { +func TestCreateAIP(t *testing.T) { t.Parallel() type test struct { @@ -76,14 +76,14 @@ func TestCreatePackage(t *testing.T) { for _, tt := range []test{ { - name: "Creates a package with minimal data", + name: "Creates an AIP with minimal data", params: &goastorage.Package{ - Name: "test_package", + Name: "test_aip", AipID: aipID, ObjectKey: objectKey, }, want: &goastorage.Package{ - Name: "test_package", + Name: "test_aip", AipID: aipID, ObjectKey: objectKey, Status: "unspecified", @@ -91,16 +91,16 @@ func TestCreatePackage(t *testing.T) { }, }, { - name: "Creates a package with all data", + name: "Creates an AIP with all data", params: &goastorage.Package{ - Name: "test_package", + Name: "test_aip", AipID: aipID, ObjectKey: objectKey, Status: "stored", LocationID: ref.New(locationID), }, want: &goastorage.Package{ - Name: "test_package", + Name: "test_aip", AipID: aipID, ObjectKey: objectKey, Status: "stored", @@ -111,7 +111,7 @@ func TestCreatePackage(t *testing.T) { { name: "Errors if locationID is not found", params: &goastorage.Package{ - Name: "test_package", + Name: "test_aip", AipID: aipID, ObjectKey: objectKey, LocationID: ref.New(uuid.MustParse("f1508f95-cab7-447f-b6a2-e01bf7c64558")), @@ -141,7 +141,7 @@ func TestCreatePackage(t *testing.T) { t.Fatalf("Couldn't create test location: %v", err) } - got, err := c.CreatePackage(ctx, tt.params) + got, err := c.CreateAIP(ctx, tt.params) if tt.wantErr != "" { assert.Error(t, err, tt.wantErr) return @@ -153,7 +153,7 @@ func TestCreatePackage(t *testing.T) { } } -func TestListPackages(t *testing.T) { +func TestListAIPs(t *testing.T) { t.Parallel() aipID2 := uuid.MustParse("96e182a0-31ab-4738-a620-1ff1954d9ecb") @@ -161,24 +161,24 @@ func TestListPackages(t *testing.T) { entc, c := setUpClient(t) - entc.Pkg.Create(). - SetName("Package"). + entc.AIP.Create(). + SetName("AIP"). SetAipID(aipID). SetObjectKey(objectKey). - SetStatus(types.StatusStored). + SetStatus(types.AIPStatusStored). SaveX(context.Background()) - entc.Pkg.Create(). - SetName("Another Package"). + entc.AIP.Create(). + SetName("Another AIP"). SetAipID(aipID2). SetObjectKey(objectKey2). - SetStatus(types.StatusRejected). + SetStatus(types.AIPStatusRejected). SaveX(context.Background()) - pkgs, err := c.ListPackages(context.Background()) + aips, err := c.ListAIPs(context.Background()) assert.NilError(t, err) - assert.DeepEqual(t, pkgs, goastorage.PackageCollection{ + assert.DeepEqual(t, aips, goastorage.PackageCollection{ { - Name: "Package", + Name: "AIP", AipID: aipID, Status: "stored", ObjectKey: objectKey, @@ -186,7 +186,7 @@ func TestListPackages(t *testing.T) { CreatedAt: "2013-02-03T19:54:00Z", }, { - Name: "Another Package", + Name: "Another AIP", AipID: aipID2, Status: "rejected", ObjectKey: objectKey2, @@ -196,23 +196,23 @@ func TestListPackages(t *testing.T) { }) } -func TestReadPackage(t *testing.T) { +func TestReadAIP(t *testing.T) { t.Parallel() t.Run("Returns valid result", func(t *testing.T) { entc, c := setUpClient(t) - entc.Pkg.Create(). - SetName("Package"). + entc.AIP.Create(). + SetName("AIP"). SetAipID(aipID). SetObjectKey(objectKey). - SetStatus(types.StatusStored). + SetStatus(types.AIPStatusStored). SaveX(context.Background()) - pkg, err := c.ReadPackage(context.Background(), aipID) + aip, err := c.ReadAIP(context.Background(), aipID) assert.NilError(t, err) - assert.DeepEqual(t, pkg, &goastorage.Package{ - Name: "Package", + assert.DeepEqual(t, aip, &goastorage.Package{ + Name: "AIP", AipID: aipID, Status: "stored", ObjectKey: objectKey, @@ -221,40 +221,40 @@ func TestReadPackage(t *testing.T) { }) }) - t.Run("Returns error when package does not exist", func(t *testing.T) { + t.Run("Returns error when AIP does not exist", func(t *testing.T) { t.Parallel() _, c := setUpClient(t) - l, err := c.ReadPackage(context.Background(), aipID) + l, err := c.ReadAIP(context.Background(), aipID) assert.Assert(t, l == nil) - assert.ErrorContains(t, err, "package not found") + assert.ErrorContains(t, err, "Storage package not found") }) } -func TestUpdatePackageStatus(t *testing.T) { +func TestUpdateAIPStatus(t *testing.T) { t.Parallel() entc, c := setUpClient(t) - p := entc.Pkg.Create(). - SetName("Package"). + a := entc.AIP.Create(). + SetName("AIP"). SetAipID(aipID). SetObjectKey(objectKey). - SetStatus(types.StatusStored). + SetStatus(types.AIPStatusStored). SaveX(context.Background()) - err := c.UpdatePackageStatus(context.Background(), p.AipID, types.StatusRejected) + err := c.UpdateAIPStatus(context.Background(), a.AipID, types.AIPStatusRejected) assert.NilError(t, err) - entc.Pkg.Query(). + entc.AIP.Query(). Where( - pkg.ID(p.ID), - pkg.StatusEQ(types.StatusRejected), + aip.ID(a.ID), + aip.StatusEQ(types.AIPStatusRejected), ).OnlyX(context.Background()) } -func TestUpdatePackageLocation(t *testing.T) { +func TestUpdateAIPLocation(t *testing.T) { t.Parallel() entc, c := setUpClient(t) @@ -285,21 +285,21 @@ func TestUpdatePackageLocation(t *testing.T) { }). SaveX(context.Background()) - p := entc.Pkg.Create(). - SetName("Package"). + a := entc.AIP.Create(). + SetName("AIP"). SetAipID(aipID). SetObjectKey(objectKey). - SetStatus(types.StatusStored). + SetStatus(types.AIPStatusStored). SetLocation(l1). SaveX(context.Background()) - err := c.UpdatePackageLocationID(context.Background(), p.AipID, l2.UUID) + err := c.UpdateAIPLocationID(context.Background(), a.AipID, l2.UUID) assert.NilError(t, err) - entc.Pkg.Query(). + entc.AIP.Query(). Where( - pkg.ID(p.ID), - pkg.LocationID(l2.ID), + aip.ID(a.ID), + aip.LocationID(l2.ID), ).OnlyX(context.Background()) } @@ -550,7 +550,7 @@ func TestReadLocation(t *testing.T) { }) } -func TestLocationPackages(t *testing.T) { +func TestLocationAIPs(t *testing.T) { t.Parallel() t.Run("Returns valid result", func(t *testing.T) { @@ -570,19 +570,19 @@ func TestLocationPackages(t *testing.T) { }). SaveX(context.Background()) - entc.Pkg.Create(). - SetName("Package"). + entc.AIP.Create(). + SetName("AIP"). SetAipID(aipID). SetObjectKey(objectKey). - SetStatus(types.StatusStored). + SetStatus(types.AIPStatusStored). SetLocation(l). SaveX(context.Background()) - pkgs, err := c.LocationPackages(context.Background(), locationID) + aips, err := c.LocationAIPs(context.Background(), locationID) assert.NilError(t, err) - assert.DeepEqual(t, pkgs, goastorage.PackageCollection{ + assert.DeepEqual(t, aips, goastorage.PackageCollection{ { - Name: "Package", + Name: "AIP", AipID: aipID, Status: "stored", ObjectKey: objectKey, @@ -610,9 +610,9 @@ func TestLocationPackages(t *testing.T) { }). SaveX(context.Background()) - pkgs, err := c.LocationPackages(context.Background(), locationID) + aips, err := c.LocationAIPs(context.Background(), locationID) assert.NilError(t, err) - assert.Assert(t, len(pkgs) == 0) + assert.Assert(t, len(aips) == 0) }) t.Run("Returns empty result if location does not exist", func(t *testing.T) { @@ -620,8 +620,8 @@ func TestLocationPackages(t *testing.T) { _, c := setUpClient(t) - pkgs, err := c.LocationPackages(context.Background(), uuid.Nil) + aips, err := c.LocationAIPs(context.Background(), uuid.Nil) assert.NilError(t, err) - assert.Assert(t, len(pkgs) == 0) + assert.Assert(t, len(aips) == 0) }) } diff --git a/internal/storage/persistence/ent/db/pkg.go b/internal/storage/persistence/ent/db/aip.go similarity index 63% rename from internal/storage/persistence/ent/db/pkg.go rename to internal/storage/persistence/ent/db/aip.go index 715acb509..f75e77acc 100644 --- a/internal/storage/persistence/ent/db/pkg.go +++ b/internal/storage/persistence/ent/db/aip.go @@ -9,14 +9,14 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/types" "github.com/google/uuid" ) -// Pkg is the model entity for the Pkg schema. -type Pkg struct { +// AIP is the model entity for the AIP schema. +type AIP struct { config `json:"-"` // ID of the ent. ID int `json:"id,omitempty"` @@ -27,19 +27,19 @@ type Pkg struct { // LocationID holds the value of the "location_id" field. LocationID int `json:"location_id,omitempty"` // Status holds the value of the "status" field. - Status types.PackageStatus `json:"status,omitempty"` + Status types.AIPStatus `json:"status,omitempty"` // ObjectKey holds the value of the "object_key" field. ObjectKey uuid.UUID `json:"object_key,omitempty"` // CreatedAt holds the value of the "created_at" field. CreatedAt time.Time `json:"created_at,omitempty"` // Edges holds the relations/edges for other nodes in the graph. - // The values are being populated by the PkgQuery when eager-loading is set. - Edges PkgEdges `json:"edges"` + // The values are being populated by the AIPQuery when eager-loading is set. + Edges AIPEdges `json:"edges"` selectValues sql.SelectValues } -// PkgEdges holds the relations/edges for other nodes in the graph. -type PkgEdges struct { +// AIPEdges holds the relations/edges for other nodes in the graph. +type AIPEdges struct { // Location holds the value of the location edge. Location *Location `json:"location,omitempty"` // loadedTypes holds the information for reporting if a @@ -49,7 +49,7 @@ type PkgEdges struct { // LocationOrErr returns the Location value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. -func (e PkgEdges) LocationOrErr() (*Location, error) { +func (e AIPEdges) LocationOrErr() (*Location, error) { if e.Location != nil { return e.Location, nil } else if e.loadedTypes[0] { @@ -59,19 +59,19 @@ func (e PkgEdges) LocationOrErr() (*Location, error) { } // scanValues returns the types for scanning values from sql.Rows. -func (*Pkg) scanValues(columns []string) ([]any, error) { +func (*AIP) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case pkg.FieldID, pkg.FieldLocationID: + case aip.FieldID, aip.FieldLocationID: values[i] = new(sql.NullInt64) - case pkg.FieldName: + case aip.FieldName: values[i] = new(sql.NullString) - case pkg.FieldCreatedAt: + case aip.FieldCreatedAt: values[i] = new(sql.NullTime) - case pkg.FieldStatus: - values[i] = new(types.PackageStatus) - case pkg.FieldAipID, pkg.FieldObjectKey: + case aip.FieldStatus: + values[i] = new(types.AIPStatus) + case aip.FieldAipID, aip.FieldObjectKey: values[i] = new(uuid.UUID) default: values[i] = new(sql.UnknownType) @@ -81,116 +81,116 @@ func (*Pkg) scanValues(columns []string) ([]any, error) { } // assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the Pkg fields. -func (pk *Pkg) assignValues(columns []string, values []any) error { +// to the AIP fields. +func (a *AIP) assignValues(columns []string, values []any) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } for i := range columns { switch columns[i] { - case pkg.FieldID: + case aip.FieldID: value, ok := values[i].(*sql.NullInt64) if !ok { return fmt.Errorf("unexpected type %T for field id", value) } - pk.ID = int(value.Int64) - case pkg.FieldName: + a.ID = int(value.Int64) + case aip.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) } else if value.Valid { - pk.Name = value.String + a.Name = value.String } - case pkg.FieldAipID: + case aip.FieldAipID: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field aip_id", values[i]) } else if value != nil { - pk.AipID = *value + a.AipID = *value } - case pkg.FieldLocationID: + case aip.FieldLocationID: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field location_id", values[i]) } else if value.Valid { - pk.LocationID = int(value.Int64) + a.LocationID = int(value.Int64) } - case pkg.FieldStatus: - if value, ok := values[i].(*types.PackageStatus); !ok { + case aip.FieldStatus: + if value, ok := values[i].(*types.AIPStatus); !ok { return fmt.Errorf("unexpected type %T for field status", values[i]) } else if value != nil { - pk.Status = *value + a.Status = *value } - case pkg.FieldObjectKey: + case aip.FieldObjectKey: if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field object_key", values[i]) } else if value != nil { - pk.ObjectKey = *value + a.ObjectKey = *value } - case pkg.FieldCreatedAt: + case aip.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - pk.CreatedAt = value.Time + a.CreatedAt = value.Time } default: - pk.selectValues.Set(columns[i], values[i]) + a.selectValues.Set(columns[i], values[i]) } } return nil } -// Value returns the ent.Value that was dynamically selected and assigned to the Pkg. +// Value returns the ent.Value that was dynamically selected and assigned to the AIP. // This includes values selected through modifiers, order, etc. -func (pk *Pkg) Value(name string) (ent.Value, error) { - return pk.selectValues.Get(name) +func (a *AIP) Value(name string) (ent.Value, error) { + return a.selectValues.Get(name) } -// QueryLocation queries the "location" edge of the Pkg entity. -func (pk *Pkg) QueryLocation() *LocationQuery { - return NewPkgClient(pk.config).QueryLocation(pk) +// QueryLocation queries the "location" edge of the AIP entity. +func (a *AIP) QueryLocation() *LocationQuery { + return NewAIPClient(a.config).QueryLocation(a) } -// Update returns a builder for updating this Pkg. -// Note that you need to call Pkg.Unwrap() before calling this method if this Pkg +// Update returns a builder for updating this AIP. +// Note that you need to call AIP.Unwrap() before calling this method if this AIP // was returned from a transaction, and the transaction was committed or rolled back. -func (pk *Pkg) Update() *PkgUpdateOne { - return NewPkgClient(pk.config).UpdateOne(pk) +func (a *AIP) Update() *AIPUpdateOne { + return NewAIPClient(a.config).UpdateOne(a) } -// Unwrap unwraps the Pkg entity that was returned from a transaction after it was closed, +// Unwrap unwraps the AIP entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. -func (pk *Pkg) Unwrap() *Pkg { - _tx, ok := pk.config.driver.(*txDriver) +func (a *AIP) Unwrap() *AIP { + _tx, ok := a.config.driver.(*txDriver) if !ok { - panic("db: Pkg is not a transactional entity") + panic("db: AIP is not a transactional entity") } - pk.config.driver = _tx.drv - return pk + a.config.driver = _tx.drv + return a } // String implements the fmt.Stringer. -func (pk *Pkg) String() string { +func (a *AIP) String() string { var builder strings.Builder - builder.WriteString("Pkg(") - builder.WriteString(fmt.Sprintf("id=%v, ", pk.ID)) + builder.WriteString("AIP(") + builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) builder.WriteString("name=") - builder.WriteString(pk.Name) + builder.WriteString(a.Name) builder.WriteString(", ") builder.WriteString("aip_id=") - builder.WriteString(fmt.Sprintf("%v", pk.AipID)) + builder.WriteString(fmt.Sprintf("%v", a.AipID)) builder.WriteString(", ") builder.WriteString("location_id=") - builder.WriteString(fmt.Sprintf("%v", pk.LocationID)) + builder.WriteString(fmt.Sprintf("%v", a.LocationID)) builder.WriteString(", ") builder.WriteString("status=") - builder.WriteString(fmt.Sprintf("%v", pk.Status)) + builder.WriteString(fmt.Sprintf("%v", a.Status)) builder.WriteString(", ") builder.WriteString("object_key=") - builder.WriteString(fmt.Sprintf("%v", pk.ObjectKey)) + builder.WriteString(fmt.Sprintf("%v", a.ObjectKey)) builder.WriteString(", ") builder.WriteString("created_at=") - builder.WriteString(pk.CreatedAt.Format(time.ANSIC)) + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) builder.WriteByte(')') return builder.String() } -// Pkgs is a parsable slice of Pkg. -type Pkgs []*Pkg +// AIPs is a parsable slice of AIP. +type AIPs []*AIP diff --git a/internal/storage/persistence/ent/db/pkg/pkg.go b/internal/storage/persistence/ent/db/aip/aip.go similarity index 90% rename from internal/storage/persistence/ent/db/pkg/pkg.go rename to internal/storage/persistence/ent/db/aip/aip.go index 279e4e418..679e20360 100644 --- a/internal/storage/persistence/ent/db/pkg/pkg.go +++ b/internal/storage/persistence/ent/db/aip/aip.go @@ -1,6 +1,6 @@ // Code generated by ent, DO NOT EDIT. -package pkg +package aip import ( "fmt" @@ -12,8 +12,8 @@ import ( ) const ( - // Label holds the string label denoting the pkg type in the database. - Label = "pkg" + // Label holds the string label denoting the aip type in the database. + Label = "aip" // FieldID holds the string denoting the id field in the database. FieldID = "id" // FieldName holds the string denoting the name field in the database. @@ -30,10 +30,10 @@ const ( FieldCreatedAt = "created_at" // EdgeLocation holds the string denoting the location edge name in mutations. EdgeLocation = "location" - // Table holds the table name of the pkg in the database. - Table = "package" + // Table holds the table name of the aip in the database. + Table = "aip" // LocationTable is the table that holds the location relation/edge. - LocationTable = "package" + LocationTable = "aip" // LocationInverseTable is the table name for the Location entity. // It exists in this package in order to avoid circular dependency with the "location" package. LocationInverseTable = "location" @@ -41,7 +41,7 @@ const ( LocationColumn = "location_id" ) -// Columns holds all SQL columns for pkg fields. +// Columns holds all SQL columns for aip fields. var Columns = []string{ FieldID, FieldName, @@ -68,16 +68,16 @@ var ( ) // StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. -func StatusValidator(s types.PackageStatus) error { +func StatusValidator(s types.AIPStatus) error { switch s.String() { case "unspecified", "in_review", "rejected", "stored", "moving": return nil default: - return fmt.Errorf("pkg: invalid enum value for status field: %q", s) + return fmt.Errorf("aip: invalid enum value for status field: %q", s) } } -// OrderOption defines the ordering options for the Pkg queries. +// OrderOption defines the ordering options for the AIP queries. type OrderOption func(*sql.Selector) // ByID orders the results by the id field. diff --git a/internal/storage/persistence/ent/db/aip/where.go b/internal/storage/persistence/ent/db/aip/where.go new file mode 100644 index 000000000..c95f7cd15 --- /dev/null +++ b/internal/storage/persistence/ent/db/aip/where.go @@ -0,0 +1,356 @@ +// Code generated by ent, DO NOT EDIT. + +package aip + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" + "github.com/artefactual-sdps/enduro/internal/storage/types" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.AIP { + return predicate.AIP(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.AIP { + return predicate.AIP(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.AIP { + return predicate.AIP(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.AIP { + return predicate.AIP(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldName, v)) +} + +// AipID applies equality check predicate on the "aip_id" field. It's identical to AipIDEQ. +func AipID(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldAipID, v)) +} + +// LocationID applies equality check predicate on the "location_id" field. It's identical to LocationIDEQ. +func LocationID(v int) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldLocationID, v)) +} + +// ObjectKey applies equality check predicate on the "object_key" field. It's identical to ObjectKeyEQ. +func ObjectKey(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldObjectKey, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldCreatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.AIP { + return predicate.AIP(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.AIP { + return predicate.AIP(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.AIP { + return predicate.AIP(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.AIP { + return predicate.AIP(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.AIP { + return predicate.AIP(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.AIP { + return predicate.AIP(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.AIP { + return predicate.AIP(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.AIP { + return predicate.AIP(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.AIP { + return predicate.AIP(sql.FieldContainsFold(FieldName, v)) +} + +// AipIDEQ applies the EQ predicate on the "aip_id" field. +func AipIDEQ(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldAipID, v)) +} + +// AipIDNEQ applies the NEQ predicate on the "aip_id" field. +func AipIDNEQ(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldAipID, v)) +} + +// AipIDIn applies the In predicate on the "aip_id" field. +func AipIDIn(vs ...uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldAipID, vs...)) +} + +// AipIDNotIn applies the NotIn predicate on the "aip_id" field. +func AipIDNotIn(vs ...uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldAipID, vs...)) +} + +// AipIDGT applies the GT predicate on the "aip_id" field. +func AipIDGT(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldGT(FieldAipID, v)) +} + +// AipIDGTE applies the GTE predicate on the "aip_id" field. +func AipIDGTE(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldGTE(FieldAipID, v)) +} + +// AipIDLT applies the LT predicate on the "aip_id" field. +func AipIDLT(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldLT(FieldAipID, v)) +} + +// AipIDLTE applies the LTE predicate on the "aip_id" field. +func AipIDLTE(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldLTE(FieldAipID, v)) +} + +// LocationIDEQ applies the EQ predicate on the "location_id" field. +func LocationIDEQ(v int) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldLocationID, v)) +} + +// LocationIDNEQ applies the NEQ predicate on the "location_id" field. +func LocationIDNEQ(v int) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldLocationID, v)) +} + +// LocationIDIn applies the In predicate on the "location_id" field. +func LocationIDIn(vs ...int) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldLocationID, vs...)) +} + +// LocationIDNotIn applies the NotIn predicate on the "location_id" field. +func LocationIDNotIn(vs ...int) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldLocationID, vs...)) +} + +// LocationIDIsNil applies the IsNil predicate on the "location_id" field. +func LocationIDIsNil() predicate.AIP { + return predicate.AIP(sql.FieldIsNull(FieldLocationID)) +} + +// LocationIDNotNil applies the NotNil predicate on the "location_id" field. +func LocationIDNotNil() predicate.AIP { + return predicate.AIP(sql.FieldNotNull(FieldLocationID)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v types.AIPStatus) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v types.AIPStatus) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...types.AIPStatus) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...types.AIPStatus) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldStatus, vs...)) +} + +// ObjectKeyEQ applies the EQ predicate on the "object_key" field. +func ObjectKeyEQ(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldObjectKey, v)) +} + +// ObjectKeyNEQ applies the NEQ predicate on the "object_key" field. +func ObjectKeyNEQ(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldObjectKey, v)) +} + +// ObjectKeyIn applies the In predicate on the "object_key" field. +func ObjectKeyIn(vs ...uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldObjectKey, vs...)) +} + +// ObjectKeyNotIn applies the NotIn predicate on the "object_key" field. +func ObjectKeyNotIn(vs ...uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldObjectKey, vs...)) +} + +// ObjectKeyGT applies the GT predicate on the "object_key" field. +func ObjectKeyGT(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldGT(FieldObjectKey, v)) +} + +// ObjectKeyGTE applies the GTE predicate on the "object_key" field. +func ObjectKeyGTE(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldGTE(FieldObjectKey, v)) +} + +// ObjectKeyLT applies the LT predicate on the "object_key" field. +func ObjectKeyLT(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldLT(FieldObjectKey, v)) +} + +// ObjectKeyLTE applies the LTE predicate on the "object_key" field. +func ObjectKeyLTE(v uuid.UUID) predicate.AIP { + return predicate.AIP(sql.FieldLTE(FieldObjectKey, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AIP { + return predicate.AIP(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AIP { + return predicate.AIP(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AIP { + return predicate.AIP(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasLocation applies the HasEdge predicate on the "location" edge. +func HasLocation() predicate.AIP { + return predicate.AIP(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, LocationTable, LocationColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasLocationWith applies the HasEdge predicate on the "location" edge with a given conditions (other predicates). +func HasLocationWith(preds ...predicate.Location) predicate.AIP { + return predicate.AIP(func(s *sql.Selector) { + step := newLocationStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AIP) predicate.AIP { + return predicate.AIP(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AIP) predicate.AIP { + return predicate.AIP(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AIP) predicate.AIP { + return predicate.AIP(sql.NotPredicates(p)) +} diff --git a/internal/storage/persistence/ent/db/aip_create.go b/internal/storage/persistence/ent/db/aip_create.go new file mode 100644 index 000000000..74d711883 --- /dev/null +++ b/internal/storage/persistence/ent/db/aip_create.go @@ -0,0 +1,298 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" + "github.com/artefactual-sdps/enduro/internal/storage/types" + "github.com/google/uuid" +) + +// AIPCreate is the builder for creating a AIP entity. +type AIPCreate struct { + config + mutation *AIPMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (ac *AIPCreate) SetName(s string) *AIPCreate { + ac.mutation.SetName(s) + return ac +} + +// SetAipID sets the "aip_id" field. +func (ac *AIPCreate) SetAipID(u uuid.UUID) *AIPCreate { + ac.mutation.SetAipID(u) + return ac +} + +// SetLocationID sets the "location_id" field. +func (ac *AIPCreate) SetLocationID(i int) *AIPCreate { + ac.mutation.SetLocationID(i) + return ac +} + +// SetNillableLocationID sets the "location_id" field if the given value is not nil. +func (ac *AIPCreate) SetNillableLocationID(i *int) *AIPCreate { + if i != nil { + ac.SetLocationID(*i) + } + return ac +} + +// SetStatus sets the "status" field. +func (ac *AIPCreate) SetStatus(ts types.AIPStatus) *AIPCreate { + ac.mutation.SetStatus(ts) + return ac +} + +// SetObjectKey sets the "object_key" field. +func (ac *AIPCreate) SetObjectKey(u uuid.UUID) *AIPCreate { + ac.mutation.SetObjectKey(u) + return ac +} + +// SetCreatedAt sets the "created_at" field. +func (ac *AIPCreate) SetCreatedAt(t time.Time) *AIPCreate { + ac.mutation.SetCreatedAt(t) + return ac +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (ac *AIPCreate) SetNillableCreatedAt(t *time.Time) *AIPCreate { + if t != nil { + ac.SetCreatedAt(*t) + } + return ac +} + +// SetLocation sets the "location" edge to the Location entity. +func (ac *AIPCreate) SetLocation(l *Location) *AIPCreate { + return ac.SetLocationID(l.ID) +} + +// Mutation returns the AIPMutation object of the builder. +func (ac *AIPCreate) Mutation() *AIPMutation { + return ac.mutation +} + +// Save creates the AIP in the database. +func (ac *AIPCreate) Save(ctx context.Context) (*AIP, error) { + ac.defaults() + return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (ac *AIPCreate) SaveX(ctx context.Context) *AIP { + v, err := ac.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ac *AIPCreate) Exec(ctx context.Context) error { + _, err := ac.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ac *AIPCreate) ExecX(ctx context.Context) { + if err := ac.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ac *AIPCreate) defaults() { + if _, ok := ac.mutation.CreatedAt(); !ok { + v := aip.DefaultCreatedAt() + ac.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ac *AIPCreate) check() error { + if _, ok := ac.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`db: missing required field "AIP.name"`)} + } + if _, ok := ac.mutation.AipID(); !ok { + return &ValidationError{Name: "aip_id", err: errors.New(`db: missing required field "AIP.aip_id"`)} + } + if _, ok := ac.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`db: missing required field "AIP.status"`)} + } + if v, ok := ac.mutation.Status(); ok { + if err := aip.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "AIP.status": %w`, err)} + } + } + if _, ok := ac.mutation.ObjectKey(); !ok { + return &ValidationError{Name: "object_key", err: errors.New(`db: missing required field "AIP.object_key"`)} + } + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "AIP.created_at"`)} + } + return nil +} + +func (ac *AIPCreate) sqlSave(ctx context.Context) (*AIP, error) { + if err := ac.check(); err != nil { + return nil, err + } + _node, _spec := ac.createSpec() + if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + ac.mutation.id = &_node.ID + ac.mutation.done = true + return _node, nil +} + +func (ac *AIPCreate) createSpec() (*AIP, *sqlgraph.CreateSpec) { + var ( + _node = &AIP{config: ac.config} + _spec = sqlgraph.NewCreateSpec(aip.Table, sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt)) + ) + if value, ok := ac.mutation.Name(); ok { + _spec.SetField(aip.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := ac.mutation.AipID(); ok { + _spec.SetField(aip.FieldAipID, field.TypeUUID, value) + _node.AipID = value + } + if value, ok := ac.mutation.Status(); ok { + _spec.SetField(aip.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if value, ok := ac.mutation.ObjectKey(); ok { + _spec.SetField(aip.FieldObjectKey, field.TypeUUID, value) + _node.ObjectKey = value + } + if value, ok := ac.mutation.CreatedAt(); ok { + _spec.SetField(aip.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := ac.mutation.LocationIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: aip.LocationTable, + Columns: []string{aip.LocationColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.LocationID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// AIPCreateBulk is the builder for creating many AIP entities in bulk. +type AIPCreateBulk struct { + config + err error + builders []*AIPCreate +} + +// Save creates the AIP entities in the database. +func (acb *AIPCreateBulk) Save(ctx context.Context) ([]*AIP, error) { + if acb.err != nil { + return nil, acb.err + } + specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) + nodes := make([]*AIP, len(acb.builders)) + mutators := make([]Mutator, len(acb.builders)) + for i := range acb.builders { + func(i int, root context.Context) { + builder := acb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AIPMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, acb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (acb *AIPCreateBulk) SaveX(ctx context.Context) []*AIP { + v, err := acb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (acb *AIPCreateBulk) Exec(ctx context.Context) error { + _, err := acb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acb *AIPCreateBulk) ExecX(ctx context.Context) { + if err := acb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/storage/persistence/ent/db/aip_delete.go b/internal/storage/persistence/ent/db/aip_delete.go new file mode 100644 index 000000000..01e1698f0 --- /dev/null +++ b/internal/storage/persistence/ent/db/aip_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" +) + +// AIPDelete is the builder for deleting a AIP entity. +type AIPDelete struct { + config + hooks []Hook + mutation *AIPMutation +} + +// Where appends a list predicates to the AIPDelete builder. +func (ad *AIPDelete) Where(ps ...predicate.AIP) *AIPDelete { + ad.mutation.Where(ps...) + return ad +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ad *AIPDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ad *AIPDelete) ExecX(ctx context.Context) int { + n, err := ad.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ad *AIPDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(aip.Table, sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt)) + if ps := ad.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ad.mutation.done = true + return affected, err +} + +// AIPDeleteOne is the builder for deleting a single AIP entity. +type AIPDeleteOne struct { + ad *AIPDelete +} + +// Where appends a list predicates to the AIPDelete builder. +func (ado *AIPDeleteOne) Where(ps ...predicate.AIP) *AIPDeleteOne { + ado.ad.mutation.Where(ps...) + return ado +} + +// Exec executes the deletion query. +func (ado *AIPDeleteOne) Exec(ctx context.Context) error { + n, err := ado.ad.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{aip.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ado *AIPDeleteOne) ExecX(ctx context.Context) { + if err := ado.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/storage/persistence/ent/db/aip_query.go b/internal/storage/persistence/ent/db/aip_query.go new file mode 100644 index 000000000..f534e22af --- /dev/null +++ b/internal/storage/persistence/ent/db/aip_query.go @@ -0,0 +1,606 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" +) + +// AIPQuery is the builder for querying AIP entities. +type AIPQuery struct { + config + ctx *QueryContext + order []aip.OrderOption + inters []Interceptor + predicates []predicate.AIP + withLocation *LocationQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AIPQuery builder. +func (aq *AIPQuery) Where(ps ...predicate.AIP) *AIPQuery { + aq.predicates = append(aq.predicates, ps...) + return aq +} + +// Limit the number of records to be returned by this query. +func (aq *AIPQuery) Limit(limit int) *AIPQuery { + aq.ctx.Limit = &limit + return aq +} + +// Offset to start from. +func (aq *AIPQuery) Offset(offset int) *AIPQuery { + aq.ctx.Offset = &offset + return aq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (aq *AIPQuery) Unique(unique bool) *AIPQuery { + aq.ctx.Unique = &unique + return aq +} + +// Order specifies how the records should be ordered. +func (aq *AIPQuery) Order(o ...aip.OrderOption) *AIPQuery { + aq.order = append(aq.order, o...) + return aq +} + +// QueryLocation chains the current query on the "location" edge. +func (aq *AIPQuery) QueryLocation() *LocationQuery { + query := (&LocationClient{config: aq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(aip.Table, aip.FieldID, selector), + sqlgraph.To(location.Table, location.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, aip.LocationTable, aip.LocationColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AIP entity from the query. +// Returns a *NotFoundError when no AIP was found. +func (aq *AIPQuery) First(ctx context.Context) (*AIP, error) { + nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{aip.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (aq *AIPQuery) FirstX(ctx context.Context) *AIP { + node, err := aq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AIP ID from the query. +// Returns a *NotFoundError when no AIP ID was found. +func (aq *AIPQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{aip.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (aq *AIPQuery) FirstIDX(ctx context.Context) int { + id, err := aq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AIP entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AIP entity is found. +// Returns a *NotFoundError when no AIP entities are found. +func (aq *AIPQuery) Only(ctx context.Context) (*AIP, error) { + nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{aip.Label} + default: + return nil, &NotSingularError{aip.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (aq *AIPQuery) OnlyX(ctx context.Context) *AIP { + node, err := aq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AIP ID in the query. +// Returns a *NotSingularError when more than one AIP ID is found. +// Returns a *NotFoundError when no entities are found. +func (aq *AIPQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{aip.Label} + default: + err = &NotSingularError{aip.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (aq *AIPQuery) OnlyIDX(ctx context.Context) int { + id, err := aq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AIPs. +func (aq *AIPQuery) All(ctx context.Context) ([]*AIP, error) { + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryAll) + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AIP, *AIPQuery]() + return withInterceptors[[]*AIP](ctx, aq, qr, aq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (aq *AIPQuery) AllX(ctx context.Context) []*AIP { + nodes, err := aq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AIP IDs. +func (aq *AIPQuery) IDs(ctx context.Context) (ids []int, err error) { + if aq.ctx.Unique == nil && aq.path != nil { + aq.Unique(true) + } + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryIDs) + if err = aq.Select(aip.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (aq *AIPQuery) IDsX(ctx context.Context) []int { + ids, err := aq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (aq *AIPQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryCount) + if err := aq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, aq, querierCount[*AIPQuery](), aq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (aq *AIPQuery) CountX(ctx context.Context) int { + count, err := aq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (aq *AIPQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryExist) + switch _, err := aq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (aq *AIPQuery) ExistX(ctx context.Context) bool { + exist, err := aq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AIPQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (aq *AIPQuery) Clone() *AIPQuery { + if aq == nil { + return nil + } + return &AIPQuery{ + config: aq.config, + ctx: aq.ctx.Clone(), + order: append([]aip.OrderOption{}, aq.order...), + inters: append([]Interceptor{}, aq.inters...), + predicates: append([]predicate.AIP{}, aq.predicates...), + withLocation: aq.withLocation.Clone(), + // clone intermediate query. + sql: aq.sql.Clone(), + path: aq.path, + } +} + +// WithLocation tells the query-builder to eager-load the nodes that are connected to +// the "location" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AIPQuery) WithLocation(opts ...func(*LocationQuery)) *AIPQuery { + query := (&LocationClient{config: aq.config}).Query() + for _, opt := range opts { + opt(query) + } + aq.withLocation = query + return aq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AIP.Query(). +// GroupBy(aip.FieldName). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (aq *AIPQuery) GroupBy(field string, fields ...string) *AIPGroupBy { + aq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AIPGroupBy{build: aq} + grbuild.flds = &aq.ctx.Fields + grbuild.label = aip.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.AIP.Query(). +// Select(aip.FieldName). +// Scan(ctx, &v) +func (aq *AIPQuery) Select(fields ...string) *AIPSelect { + aq.ctx.Fields = append(aq.ctx.Fields, fields...) + sbuild := &AIPSelect{AIPQuery: aq} + sbuild.label = aip.Label + sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AIPSelect configured with the given aggregations. +func (aq *AIPQuery) Aggregate(fns ...AggregateFunc) *AIPSelect { + return aq.Select().Aggregate(fns...) +} + +func (aq *AIPQuery) prepareQuery(ctx context.Context) error { + for _, inter := range aq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, aq); err != nil { + return err + } + } + } + for _, f := range aq.ctx.Fields { + if !aip.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if aq.path != nil { + prev, err := aq.path(ctx) + if err != nil { + return err + } + aq.sql = prev + } + return nil +} + +func (aq *AIPQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AIP, error) { + var ( + nodes = []*AIP{} + _spec = aq.querySpec() + loadedTypes = [1]bool{ + aq.withLocation != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AIP).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AIP{config: aq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := aq.withLocation; query != nil { + if err := aq.loadLocation(ctx, query, nodes, nil, + func(n *AIP, e *Location) { n.Edges.Location = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (aq *AIPQuery) loadLocation(ctx context.Context, query *LocationQuery, nodes []*AIP, init func(*AIP), assign func(*AIP, *Location)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*AIP) + for i := range nodes { + fk := nodes[i].LocationID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(location.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "location_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (aq *AIPQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aq.querySpec() + _spec.Node.Columns = aq.ctx.Fields + if len(aq.ctx.Fields) > 0 { + _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, aq.driver, _spec) +} + +func (aq *AIPQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(aip.Table, aip.Columns, sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt)) + _spec.From = aq.sql + if unique := aq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if aq.path != nil { + _spec.Unique = true + } + if fields := aq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, aip.FieldID) + for i := range fields { + if fields[i] != aip.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if aq.withLocation != nil { + _spec.Node.AddColumnOnce(aip.FieldLocationID) + } + } + if ps := aq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := aq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := aq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := aq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (aq *AIPQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(aq.driver.Dialect()) + t1 := builder.Table(aip.Table) + columns := aq.ctx.Fields + if len(columns) == 0 { + columns = aip.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if aq.sql != nil { + selector = aq.sql + selector.Select(selector.Columns(columns...)...) + } + if aq.ctx.Unique != nil && *aq.ctx.Unique { + selector.Distinct() + } + for _, p := range aq.predicates { + p(selector) + } + for _, p := range aq.order { + p(selector) + } + if offset := aq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := aq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AIPGroupBy is the group-by builder for AIP entities. +type AIPGroupBy struct { + selector + build *AIPQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (agb *AIPGroupBy) Aggregate(fns ...AggregateFunc) *AIPGroupBy { + agb.fns = append(agb.fns, fns...) + return agb +} + +// Scan applies the selector query and scans the result into the given value. +func (agb *AIPGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, agb.build.ctx, ent.OpQueryGroupBy) + if err := agb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AIPQuery, *AIPGroupBy](ctx, agb.build, agb, agb.build.inters, v) +} + +func (agb *AIPGroupBy) sqlScan(ctx context.Context, root *AIPQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*agb.flds)+len(agb.fns)) + for _, f := range *agb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*agb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := agb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AIPSelect is the builder for selecting fields of AIP entities. +type AIPSelect struct { + *AIPQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (as *AIPSelect) Aggregate(fns ...AggregateFunc) *AIPSelect { + as.fns = append(as.fns, fns...) + return as +} + +// Scan applies the selector query and scans the result into the given value. +func (as *AIPSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, as.ctx, ent.OpQuerySelect) + if err := as.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AIPQuery, *AIPSelect](ctx, as.AIPQuery, as, as.inters, v) +} + +func (as *AIPSelect) sqlScan(ctx context.Context, root *AIPQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(as.fns)) + for _, fn := range as.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*as.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := as.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/storage/persistence/ent/db/aip_update.go b/internal/storage/persistence/ent/db/aip_update.go new file mode 100644 index 000000000..b9d68e721 --- /dev/null +++ b/internal/storage/persistence/ent/db/aip_update.go @@ -0,0 +1,460 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" + "github.com/artefactual-sdps/enduro/internal/storage/types" + "github.com/google/uuid" +) + +// AIPUpdate is the builder for updating AIP entities. +type AIPUpdate struct { + config + hooks []Hook + mutation *AIPMutation +} + +// Where appends a list predicates to the AIPUpdate builder. +func (au *AIPUpdate) Where(ps ...predicate.AIP) *AIPUpdate { + au.mutation.Where(ps...) + return au +} + +// SetName sets the "name" field. +func (au *AIPUpdate) SetName(s string) *AIPUpdate { + au.mutation.SetName(s) + return au +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (au *AIPUpdate) SetNillableName(s *string) *AIPUpdate { + if s != nil { + au.SetName(*s) + } + return au +} + +// SetAipID sets the "aip_id" field. +func (au *AIPUpdate) SetAipID(u uuid.UUID) *AIPUpdate { + au.mutation.SetAipID(u) + return au +} + +// SetNillableAipID sets the "aip_id" field if the given value is not nil. +func (au *AIPUpdate) SetNillableAipID(u *uuid.UUID) *AIPUpdate { + if u != nil { + au.SetAipID(*u) + } + return au +} + +// SetLocationID sets the "location_id" field. +func (au *AIPUpdate) SetLocationID(i int) *AIPUpdate { + au.mutation.SetLocationID(i) + return au +} + +// SetNillableLocationID sets the "location_id" field if the given value is not nil. +func (au *AIPUpdate) SetNillableLocationID(i *int) *AIPUpdate { + if i != nil { + au.SetLocationID(*i) + } + return au +} + +// ClearLocationID clears the value of the "location_id" field. +func (au *AIPUpdate) ClearLocationID() *AIPUpdate { + au.mutation.ClearLocationID() + return au +} + +// SetStatus sets the "status" field. +func (au *AIPUpdate) SetStatus(ts types.AIPStatus) *AIPUpdate { + au.mutation.SetStatus(ts) + return au +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (au *AIPUpdate) SetNillableStatus(ts *types.AIPStatus) *AIPUpdate { + if ts != nil { + au.SetStatus(*ts) + } + return au +} + +// SetObjectKey sets the "object_key" field. +func (au *AIPUpdate) SetObjectKey(u uuid.UUID) *AIPUpdate { + au.mutation.SetObjectKey(u) + return au +} + +// SetNillableObjectKey sets the "object_key" field if the given value is not nil. +func (au *AIPUpdate) SetNillableObjectKey(u *uuid.UUID) *AIPUpdate { + if u != nil { + au.SetObjectKey(*u) + } + return au +} + +// SetLocation sets the "location" edge to the Location entity. +func (au *AIPUpdate) SetLocation(l *Location) *AIPUpdate { + return au.SetLocationID(l.ID) +} + +// Mutation returns the AIPMutation object of the builder. +func (au *AIPUpdate) Mutation() *AIPMutation { + return au.mutation +} + +// ClearLocation clears the "location" edge to the Location entity. +func (au *AIPUpdate) ClearLocation() *AIPUpdate { + au.mutation.ClearLocation() + return au +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (au *AIPUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (au *AIPUpdate) SaveX(ctx context.Context) int { + affected, err := au.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (au *AIPUpdate) Exec(ctx context.Context) error { + _, err := au.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (au *AIPUpdate) ExecX(ctx context.Context) { + if err := au.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (au *AIPUpdate) check() error { + if v, ok := au.mutation.Status(); ok { + if err := aip.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "AIP.status": %w`, err)} + } + } + return nil +} + +func (au *AIPUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := au.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(aip.Table, aip.Columns, sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt)) + if ps := au.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := au.mutation.Name(); ok { + _spec.SetField(aip.FieldName, field.TypeString, value) + } + if value, ok := au.mutation.AipID(); ok { + _spec.SetField(aip.FieldAipID, field.TypeUUID, value) + } + if value, ok := au.mutation.Status(); ok { + _spec.SetField(aip.FieldStatus, field.TypeEnum, value) + } + if value, ok := au.mutation.ObjectKey(); ok { + _spec.SetField(aip.FieldObjectKey, field.TypeUUID, value) + } + if au.mutation.LocationCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: aip.LocationTable, + Columns: []string{aip.LocationColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.LocationIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: aip.LocationTable, + Columns: []string{aip.LocationColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{aip.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + au.mutation.done = true + return n, nil +} + +// AIPUpdateOne is the builder for updating a single AIP entity. +type AIPUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AIPMutation +} + +// SetName sets the "name" field. +func (auo *AIPUpdateOne) SetName(s string) *AIPUpdateOne { + auo.mutation.SetName(s) + return auo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (auo *AIPUpdateOne) SetNillableName(s *string) *AIPUpdateOne { + if s != nil { + auo.SetName(*s) + } + return auo +} + +// SetAipID sets the "aip_id" field. +func (auo *AIPUpdateOne) SetAipID(u uuid.UUID) *AIPUpdateOne { + auo.mutation.SetAipID(u) + return auo +} + +// SetNillableAipID sets the "aip_id" field if the given value is not nil. +func (auo *AIPUpdateOne) SetNillableAipID(u *uuid.UUID) *AIPUpdateOne { + if u != nil { + auo.SetAipID(*u) + } + return auo +} + +// SetLocationID sets the "location_id" field. +func (auo *AIPUpdateOne) SetLocationID(i int) *AIPUpdateOne { + auo.mutation.SetLocationID(i) + return auo +} + +// SetNillableLocationID sets the "location_id" field if the given value is not nil. +func (auo *AIPUpdateOne) SetNillableLocationID(i *int) *AIPUpdateOne { + if i != nil { + auo.SetLocationID(*i) + } + return auo +} + +// ClearLocationID clears the value of the "location_id" field. +func (auo *AIPUpdateOne) ClearLocationID() *AIPUpdateOne { + auo.mutation.ClearLocationID() + return auo +} + +// SetStatus sets the "status" field. +func (auo *AIPUpdateOne) SetStatus(ts types.AIPStatus) *AIPUpdateOne { + auo.mutation.SetStatus(ts) + return auo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (auo *AIPUpdateOne) SetNillableStatus(ts *types.AIPStatus) *AIPUpdateOne { + if ts != nil { + auo.SetStatus(*ts) + } + return auo +} + +// SetObjectKey sets the "object_key" field. +func (auo *AIPUpdateOne) SetObjectKey(u uuid.UUID) *AIPUpdateOne { + auo.mutation.SetObjectKey(u) + return auo +} + +// SetNillableObjectKey sets the "object_key" field if the given value is not nil. +func (auo *AIPUpdateOne) SetNillableObjectKey(u *uuid.UUID) *AIPUpdateOne { + if u != nil { + auo.SetObjectKey(*u) + } + return auo +} + +// SetLocation sets the "location" edge to the Location entity. +func (auo *AIPUpdateOne) SetLocation(l *Location) *AIPUpdateOne { + return auo.SetLocationID(l.ID) +} + +// Mutation returns the AIPMutation object of the builder. +func (auo *AIPUpdateOne) Mutation() *AIPMutation { + return auo.mutation +} + +// ClearLocation clears the "location" edge to the Location entity. +func (auo *AIPUpdateOne) ClearLocation() *AIPUpdateOne { + auo.mutation.ClearLocation() + return auo +} + +// Where appends a list predicates to the AIPUpdate builder. +func (auo *AIPUpdateOne) Where(ps ...predicate.AIP) *AIPUpdateOne { + auo.mutation.Where(ps...) + return auo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (auo *AIPUpdateOne) Select(field string, fields ...string) *AIPUpdateOne { + auo.fields = append([]string{field}, fields...) + return auo +} + +// Save executes the query and returns the updated AIP entity. +func (auo *AIPUpdateOne) Save(ctx context.Context) (*AIP, error) { + return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (auo *AIPUpdateOne) SaveX(ctx context.Context) *AIP { + node, err := auo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (auo *AIPUpdateOne) Exec(ctx context.Context) error { + _, err := auo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (auo *AIPUpdateOne) ExecX(ctx context.Context) { + if err := auo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (auo *AIPUpdateOne) check() error { + if v, ok := auo.mutation.Status(); ok { + if err := aip.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "AIP.status": %w`, err)} + } + } + return nil +} + +func (auo *AIPUpdateOne) sqlSave(ctx context.Context) (_node *AIP, err error) { + if err := auo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(aip.Table, aip.Columns, sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt)) + id, ok := auo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "AIP.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := auo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, aip.FieldID) + for _, f := range fields { + if !aip.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != aip.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := auo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := auo.mutation.Name(); ok { + _spec.SetField(aip.FieldName, field.TypeString, value) + } + if value, ok := auo.mutation.AipID(); ok { + _spec.SetField(aip.FieldAipID, field.TypeUUID, value) + } + if value, ok := auo.mutation.Status(); ok { + _spec.SetField(aip.FieldStatus, field.TypeEnum, value) + } + if value, ok := auo.mutation.ObjectKey(); ok { + _spec.SetField(aip.FieldObjectKey, field.TypeUUID, value) + } + if auo.mutation.LocationCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: aip.LocationTable, + Columns: []string{aip.LocationColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.LocationIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: aip.LocationTable, + Columns: []string{aip.LocationColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AIP{config: auo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{aip.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + auo.mutation.done = true + return _node, nil +} diff --git a/internal/storage/persistence/ent/db/client.go b/internal/storage/persistence/ent/db/client.go index bd7e5d88f..488eb769c 100644 --- a/internal/storage/persistence/ent/db/client.go +++ b/internal/storage/persistence/ent/db/client.go @@ -15,8 +15,8 @@ import ( "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" ) // Client is the client that holds all ent builders. @@ -24,10 +24,10 @@ type Client struct { config // Schema is the client for creating, migrating and dropping schema. Schema *migrate.Schema + // AIP is the client for interacting with the AIP builders. + AIP *AIPClient // Location is the client for interacting with the Location builders. Location *LocationClient - // Pkg is the client for interacting with the Pkg builders. - Pkg *PkgClient } // NewClient creates a new client configured with the given options. @@ -39,8 +39,8 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) + c.AIP = NewAIPClient(c.config) c.Location = NewLocationClient(c.config) - c.Pkg = NewPkgClient(c.config) } type ( @@ -133,8 +133,8 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { return &Tx{ ctx: ctx, config: cfg, + AIP: NewAIPClient(cfg), Location: NewLocationClient(cfg), - Pkg: NewPkgClient(cfg), }, nil } @@ -154,15 +154,15 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) return &Tx{ ctx: ctx, config: cfg, + AIP: NewAIPClient(cfg), Location: NewLocationClient(cfg), - Pkg: NewPkgClient(cfg), }, nil } // Debug returns a new debug-client. It's used to get verbose logging on specific operations. // // client.Debug(). -// Location. +// AIP. // Query(). // Count(ctx) func (c *Client) Debug() *Client { @@ -184,130 +184,130 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { + c.AIP.Use(hooks...) c.Location.Use(hooks...) - c.Pkg.Use(hooks...) } // Intercept adds the query interceptors to all the entity clients. // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { + c.AIP.Intercept(interceptors...) c.Location.Intercept(interceptors...) - c.Pkg.Intercept(interceptors...) } // Mutate implements the ent.Mutator interface. func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { switch m := m.(type) { + case *AIPMutation: + return c.AIP.mutate(ctx, m) case *LocationMutation: return c.Location.mutate(ctx, m) - case *PkgMutation: - return c.Pkg.mutate(ctx, m) default: return nil, fmt.Errorf("db: unknown mutation type %T", m) } } -// LocationClient is a client for the Location schema. -type LocationClient struct { +// AIPClient is a client for the AIP schema. +type AIPClient struct { config } -// NewLocationClient returns a client for the Location from the given config. -func NewLocationClient(c config) *LocationClient { - return &LocationClient{config: c} +// NewAIPClient returns a client for the AIP from the given config. +func NewAIPClient(c config) *AIPClient { + return &AIPClient{config: c} } // Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `location.Hooks(f(g(h())))`. -func (c *LocationClient) Use(hooks ...Hook) { - c.hooks.Location = append(c.hooks.Location, hooks...) +// A call to `Use(f, g, h)` equals to `aip.Hooks(f(g(h())))`. +func (c *AIPClient) Use(hooks ...Hook) { + c.hooks.AIP = append(c.hooks.AIP, hooks...) } // Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `location.Intercept(f(g(h())))`. -func (c *LocationClient) Intercept(interceptors ...Interceptor) { - c.inters.Location = append(c.inters.Location, interceptors...) +// A call to `Intercept(f, g, h)` equals to `aip.Intercept(f(g(h())))`. +func (c *AIPClient) Intercept(interceptors ...Interceptor) { + c.inters.AIP = append(c.inters.AIP, interceptors...) } -// Create returns a builder for creating a Location entity. -func (c *LocationClient) Create() *LocationCreate { - mutation := newLocationMutation(c.config, OpCreate) - return &LocationCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +// Create returns a builder for creating a AIP entity. +func (c *AIPClient) Create() *AIPCreate { + mutation := newAIPMutation(c.config, OpCreate) + return &AIPCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// CreateBulk returns a builder for creating a bulk of Location entities. -func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreateBulk { - return &LocationCreateBulk{config: c.config, builders: builders} +// CreateBulk returns a builder for creating a bulk of AIP entities. +func (c *AIPClient) CreateBulk(builders ...*AIPCreate) *AIPCreateBulk { + return &AIPCreateBulk{config: c.config, builders: builders} } // MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates // a builder and applies setFunc on it. -func (c *LocationClient) MapCreateBulk(slice any, setFunc func(*LocationCreate, int)) *LocationCreateBulk { +func (c *AIPClient) MapCreateBulk(slice any, setFunc func(*AIPCreate, int)) *AIPCreateBulk { rv := reflect.ValueOf(slice) if rv.Kind() != reflect.Slice { - return &LocationCreateBulk{err: fmt.Errorf("calling to LocationClient.MapCreateBulk with wrong type %T, need slice", slice)} + return &AIPCreateBulk{err: fmt.Errorf("calling to AIPClient.MapCreateBulk with wrong type %T, need slice", slice)} } - builders := make([]*LocationCreate, rv.Len()) + builders := make([]*AIPCreate, rv.Len()) for i := 0; i < rv.Len(); i++ { builders[i] = c.Create() setFunc(builders[i], i) } - return &LocationCreateBulk{config: c.config, builders: builders} + return &AIPCreateBulk{config: c.config, builders: builders} } -// Update returns an update builder for Location. -func (c *LocationClient) Update() *LocationUpdate { - mutation := newLocationMutation(c.config, OpUpdate) - return &LocationUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +// Update returns an update builder for AIP. +func (c *AIPClient) Update() *AIPUpdate { + mutation := newAIPMutation(c.config, OpUpdate) + return &AIPUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} } // UpdateOne returns an update builder for the given entity. -func (c *LocationClient) UpdateOne(l *Location) *LocationUpdateOne { - mutation := newLocationMutation(c.config, OpUpdateOne, withLocation(l)) - return &LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +func (c *AIPClient) UpdateOne(a *AIP) *AIPUpdateOne { + mutation := newAIPMutation(c.config, OpUpdateOne, withAIP(a)) + return &AIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } // UpdateOneID returns an update builder for the given id. -func (c *LocationClient) UpdateOneID(id int) *LocationUpdateOne { - mutation := newLocationMutation(c.config, OpUpdateOne, withLocationID(id)) - return &LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +func (c *AIPClient) UpdateOneID(id int) *AIPUpdateOne { + mutation := newAIPMutation(c.config, OpUpdateOne, withAIPID(id)) + return &AIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// Delete returns a delete builder for Location. -func (c *LocationClient) Delete() *LocationDelete { - mutation := newLocationMutation(c.config, OpDelete) - return &LocationDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +// Delete returns a delete builder for AIP. +func (c *AIPClient) Delete() *AIPDelete { + mutation := newAIPMutation(c.config, OpDelete) + return &AIPDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } // DeleteOne returns a builder for deleting the given entity. -func (c *LocationClient) DeleteOne(l *Location) *LocationDeleteOne { - return c.DeleteOneID(l.ID) +func (c *AIPClient) DeleteOne(a *AIP) *AIPDeleteOne { + return c.DeleteOneID(a.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. -func (c *LocationClient) DeleteOneID(id int) *LocationDeleteOne { - builder := c.Delete().Where(location.ID(id)) +func (c *AIPClient) DeleteOneID(id int) *AIPDeleteOne { + builder := c.Delete().Where(aip.ID(id)) builder.mutation.id = &id builder.mutation.op = OpDeleteOne - return &LocationDeleteOne{builder} + return &AIPDeleteOne{builder} } -// Query returns a query builder for Location. -func (c *LocationClient) Query() *LocationQuery { - return &LocationQuery{ +// Query returns a query builder for AIP. +func (c *AIPClient) Query() *AIPQuery { + return &AIPQuery{ config: c.config, - ctx: &QueryContext{Type: TypeLocation}, + ctx: &QueryContext{Type: TypeAIP}, inters: c.Interceptors(), } } -// Get returns a Location entity by its id. -func (c *LocationClient) Get(ctx context.Context, id int) (*Location, error) { - return c.Query().Where(location.ID(id)).Only(ctx) +// Get returns a AIP entity by its id. +func (c *AIPClient) Get(ctx context.Context, id int) (*AIP, error) { + return c.Query().Where(aip.ID(id)).Only(ctx) } // GetX is like Get, but panics if an error occurs. -func (c *LocationClient) GetX(ctx context.Context, id int) *Location { +func (c *AIPClient) GetX(ctx context.Context, id int) *AIP { obj, err := c.Get(ctx, id) if err != nil { panic(err) @@ -315,148 +315,148 @@ func (c *LocationClient) GetX(ctx context.Context, id int) *Location { return obj } -// QueryPackages queries the packages edge of a Location. -func (c *LocationClient) QueryPackages(l *Location) *PkgQuery { - query := (&PkgClient{config: c.config}).Query() +// QueryLocation queries the location edge of a AIP. +func (c *AIPClient) QueryLocation(a *AIP) *LocationQuery { + query := (&LocationClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := l.ID + id := a.ID step := sqlgraph.NewStep( - sqlgraph.From(location.Table, location.FieldID, id), - sqlgraph.To(pkg.Table, pkg.FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, location.PackagesTable, location.PackagesColumn), + sqlgraph.From(aip.Table, aip.FieldID, id), + sqlgraph.To(location.Table, location.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, aip.LocationTable, aip.LocationColumn), ) - fromV = sqlgraph.Neighbors(l.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) return fromV, nil } return query } // Hooks returns the client hooks. -func (c *LocationClient) Hooks() []Hook { - return c.hooks.Location +func (c *AIPClient) Hooks() []Hook { + return c.hooks.AIP } // Interceptors returns the client interceptors. -func (c *LocationClient) Interceptors() []Interceptor { - return c.inters.Location +func (c *AIPClient) Interceptors() []Interceptor { + return c.inters.AIP } -func (c *LocationClient) mutate(ctx context.Context, m *LocationMutation) (Value, error) { +func (c *AIPClient) mutate(ctx context.Context, m *AIPMutation) (Value, error) { switch m.Op() { case OpCreate: - return (&LocationCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + return (&AIPCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) case OpUpdate: - return (&LocationUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + return (&AIPUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) case OpUpdateOne: - return (&LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + return (&AIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) case OpDelete, OpDeleteOne: - return (&LocationDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + return (&AIPDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) default: - return nil, fmt.Errorf("db: unknown Location mutation op: %q", m.Op()) + return nil, fmt.Errorf("db: unknown AIP mutation op: %q", m.Op()) } } -// PkgClient is a client for the Pkg schema. -type PkgClient struct { +// LocationClient is a client for the Location schema. +type LocationClient struct { config } -// NewPkgClient returns a client for the Pkg from the given config. -func NewPkgClient(c config) *PkgClient { - return &PkgClient{config: c} +// NewLocationClient returns a client for the Location from the given config. +func NewLocationClient(c config) *LocationClient { + return &LocationClient{config: c} } // Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `pkg.Hooks(f(g(h())))`. -func (c *PkgClient) Use(hooks ...Hook) { - c.hooks.Pkg = append(c.hooks.Pkg, hooks...) +// A call to `Use(f, g, h)` equals to `location.Hooks(f(g(h())))`. +func (c *LocationClient) Use(hooks ...Hook) { + c.hooks.Location = append(c.hooks.Location, hooks...) } // Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `pkg.Intercept(f(g(h())))`. -func (c *PkgClient) Intercept(interceptors ...Interceptor) { - c.inters.Pkg = append(c.inters.Pkg, interceptors...) +// A call to `Intercept(f, g, h)` equals to `location.Intercept(f(g(h())))`. +func (c *LocationClient) Intercept(interceptors ...Interceptor) { + c.inters.Location = append(c.inters.Location, interceptors...) } -// Create returns a builder for creating a Pkg entity. -func (c *PkgClient) Create() *PkgCreate { - mutation := newPkgMutation(c.config, OpCreate) - return &PkgCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +// Create returns a builder for creating a Location entity. +func (c *LocationClient) Create() *LocationCreate { + mutation := newLocationMutation(c.config, OpCreate) + return &LocationCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// CreateBulk returns a builder for creating a bulk of Pkg entities. -func (c *PkgClient) CreateBulk(builders ...*PkgCreate) *PkgCreateBulk { - return &PkgCreateBulk{config: c.config, builders: builders} +// CreateBulk returns a builder for creating a bulk of Location entities. +func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreateBulk { + return &LocationCreateBulk{config: c.config, builders: builders} } // MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates // a builder and applies setFunc on it. -func (c *PkgClient) MapCreateBulk(slice any, setFunc func(*PkgCreate, int)) *PkgCreateBulk { +func (c *LocationClient) MapCreateBulk(slice any, setFunc func(*LocationCreate, int)) *LocationCreateBulk { rv := reflect.ValueOf(slice) if rv.Kind() != reflect.Slice { - return &PkgCreateBulk{err: fmt.Errorf("calling to PkgClient.MapCreateBulk with wrong type %T, need slice", slice)} + return &LocationCreateBulk{err: fmt.Errorf("calling to LocationClient.MapCreateBulk with wrong type %T, need slice", slice)} } - builders := make([]*PkgCreate, rv.Len()) + builders := make([]*LocationCreate, rv.Len()) for i := 0; i < rv.Len(); i++ { builders[i] = c.Create() setFunc(builders[i], i) } - return &PkgCreateBulk{config: c.config, builders: builders} + return &LocationCreateBulk{config: c.config, builders: builders} } -// Update returns an update builder for Pkg. -func (c *PkgClient) Update() *PkgUpdate { - mutation := newPkgMutation(c.config, OpUpdate) - return &PkgUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +// Update returns an update builder for Location. +func (c *LocationClient) Update() *LocationUpdate { + mutation := newLocationMutation(c.config, OpUpdate) + return &LocationUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} } // UpdateOne returns an update builder for the given entity. -func (c *PkgClient) UpdateOne(pk *Pkg) *PkgUpdateOne { - mutation := newPkgMutation(c.config, OpUpdateOne, withPkg(pk)) - return &PkgUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +func (c *LocationClient) UpdateOne(l *Location) *LocationUpdateOne { + mutation := newLocationMutation(c.config, OpUpdateOne, withLocation(l)) + return &LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } // UpdateOneID returns an update builder for the given id. -func (c *PkgClient) UpdateOneID(id int) *PkgUpdateOne { - mutation := newPkgMutation(c.config, OpUpdateOne, withPkgID(id)) - return &PkgUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +func (c *LocationClient) UpdateOneID(id int) *LocationUpdateOne { + mutation := newLocationMutation(c.config, OpUpdateOne, withLocationID(id)) + return &LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } -// Delete returns a delete builder for Pkg. -func (c *PkgClient) Delete() *PkgDelete { - mutation := newPkgMutation(c.config, OpDelete) - return &PkgDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +// Delete returns a delete builder for Location. +func (c *LocationClient) Delete() *LocationDelete { + mutation := newLocationMutation(c.config, OpDelete) + return &LocationDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} } // DeleteOne returns a builder for deleting the given entity. -func (c *PkgClient) DeleteOne(pk *Pkg) *PkgDeleteOne { - return c.DeleteOneID(pk.ID) +func (c *LocationClient) DeleteOne(l *Location) *LocationDeleteOne { + return c.DeleteOneID(l.ID) } // DeleteOneID returns a builder for deleting the given entity by its id. -func (c *PkgClient) DeleteOneID(id int) *PkgDeleteOne { - builder := c.Delete().Where(pkg.ID(id)) +func (c *LocationClient) DeleteOneID(id int) *LocationDeleteOne { + builder := c.Delete().Where(location.ID(id)) builder.mutation.id = &id builder.mutation.op = OpDeleteOne - return &PkgDeleteOne{builder} + return &LocationDeleteOne{builder} } -// Query returns a query builder for Pkg. -func (c *PkgClient) Query() *PkgQuery { - return &PkgQuery{ +// Query returns a query builder for Location. +func (c *LocationClient) Query() *LocationQuery { + return &LocationQuery{ config: c.config, - ctx: &QueryContext{Type: TypePkg}, + ctx: &QueryContext{Type: TypeLocation}, inters: c.Interceptors(), } } -// Get returns a Pkg entity by its id. -func (c *PkgClient) Get(ctx context.Context, id int) (*Pkg, error) { - return c.Query().Where(pkg.ID(id)).Only(ctx) +// Get returns a Location entity by its id. +func (c *LocationClient) Get(ctx context.Context, id int) (*Location, error) { + return c.Query().Where(location.ID(id)).Only(ctx) } // GetX is like Get, but panics if an error occurs. -func (c *PkgClient) GetX(ctx context.Context, id int) *Pkg { +func (c *LocationClient) GetX(ctx context.Context, id int) *Location { obj, err := c.Get(ctx, id) if err != nil { panic(err) @@ -464,53 +464,53 @@ func (c *PkgClient) GetX(ctx context.Context, id int) *Pkg { return obj } -// QueryLocation queries the location edge of a Pkg. -func (c *PkgClient) QueryLocation(pk *Pkg) *LocationQuery { - query := (&LocationClient{config: c.config}).Query() +// QueryAips queries the aips edge of a Location. +func (c *LocationClient) QueryAips(l *Location) *AIPQuery { + query := (&AIPClient{config: c.config}).Query() query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := pk.ID + id := l.ID step := sqlgraph.NewStep( - sqlgraph.From(pkg.Table, pkg.FieldID, id), - sqlgraph.To(location.Table, location.FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, pkg.LocationTable, pkg.LocationColumn), + sqlgraph.From(location.Table, location.FieldID, id), + sqlgraph.To(aip.Table, aip.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, location.AipsTable, location.AipsColumn), ) - fromV = sqlgraph.Neighbors(pk.driver.Dialect(), step) + fromV = sqlgraph.Neighbors(l.driver.Dialect(), step) return fromV, nil } return query } // Hooks returns the client hooks. -func (c *PkgClient) Hooks() []Hook { - return c.hooks.Pkg +func (c *LocationClient) Hooks() []Hook { + return c.hooks.Location } // Interceptors returns the client interceptors. -func (c *PkgClient) Interceptors() []Interceptor { - return c.inters.Pkg +func (c *LocationClient) Interceptors() []Interceptor { + return c.inters.Location } -func (c *PkgClient) mutate(ctx context.Context, m *PkgMutation) (Value, error) { +func (c *LocationClient) mutate(ctx context.Context, m *LocationMutation) (Value, error) { switch m.Op() { case OpCreate: - return (&PkgCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + return (&LocationCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) case OpUpdate: - return (&PkgUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + return (&LocationUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) case OpUpdateOne: - return (&PkgUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + return (&LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) case OpDelete, OpDeleteOne: - return (&PkgDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + return (&LocationDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) default: - return nil, fmt.Errorf("db: unknown Pkg mutation op: %q", m.Op()) + return nil, fmt.Errorf("db: unknown Location mutation op: %q", m.Op()) } } // hooks and interceptors per client, for fast access. type ( hooks struct { - Location, Pkg []ent.Hook + AIP, Location []ent.Hook } inters struct { - Location, Pkg []ent.Interceptor + AIP, Location []ent.Interceptor } ) diff --git a/internal/storage/persistence/ent/db/ent.go b/internal/storage/persistence/ent/db/ent.go index aae3304aa..dde4655f7 100644 --- a/internal/storage/persistence/ent/db/ent.go +++ b/internal/storage/persistence/ent/db/ent.go @@ -12,8 +12,8 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" ) // ent aliases to avoid import conflicts in user's code. @@ -74,8 +74,8 @@ var ( func checkColumn(table, column string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + aip.Table: aip.ValidColumn, location.Table: location.ValidColumn, - pkg.Table: pkg.ValidColumn, }) }) return columnCheck(table, column) diff --git a/internal/storage/persistence/ent/db/hook/hook.go b/internal/storage/persistence/ent/db/hook/hook.go index afe19caba..280096d5f 100644 --- a/internal/storage/persistence/ent/db/hook/hook.go +++ b/internal/storage/persistence/ent/db/hook/hook.go @@ -9,28 +9,28 @@ import ( "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db" ) -// The LocationFunc type is an adapter to allow the use of ordinary -// function as Location mutator. -type LocationFunc func(context.Context, *db.LocationMutation) (db.Value, error) +// The AIPFunc type is an adapter to allow the use of ordinary +// function as AIP mutator. +type AIPFunc func(context.Context, *db.AIPMutation) (db.Value, error) // Mutate calls f(ctx, m). -func (f LocationFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { - if mv, ok := m.(*db.LocationMutation); ok { +func (f AIPFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { + if mv, ok := m.(*db.AIPMutation); ok { return f(ctx, mv) } - return nil, fmt.Errorf("unexpected mutation type %T. expect *db.LocationMutation", m) + return nil, fmt.Errorf("unexpected mutation type %T. expect *db.AIPMutation", m) } -// The PkgFunc type is an adapter to allow the use of ordinary -// function as Pkg mutator. -type PkgFunc func(context.Context, *db.PkgMutation) (db.Value, error) +// The LocationFunc type is an adapter to allow the use of ordinary +// function as Location mutator. +type LocationFunc func(context.Context, *db.LocationMutation) (db.Value, error) // Mutate calls f(ctx, m). -func (f PkgFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { - if mv, ok := m.(*db.PkgMutation); ok { +func (f LocationFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { + if mv, ok := m.(*db.LocationMutation); ok { return f(ctx, mv) } - return nil, fmt.Errorf("unexpected mutation type %T. expect *db.PkgMutation", m) + return nil, fmt.Errorf("unexpected mutation type %T. expect *db.LocationMutation", m) } // Condition is a hook condition function. diff --git a/internal/storage/persistence/ent/db/location.go b/internal/storage/persistence/ent/db/location.go index fe30e435e..8241d821e 100644 --- a/internal/storage/persistence/ent/db/location.go +++ b/internal/storage/persistence/ent/db/location.go @@ -42,20 +42,20 @@ type Location struct { // LocationEdges holds the relations/edges for other nodes in the graph. type LocationEdges struct { - // Packages holds the value of the packages edge. - Packages []*Pkg `json:"packages,omitempty"` + // Aips holds the value of the aips edge. + Aips []*AIP `json:"aips,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool } -// PackagesOrErr returns the Packages value or an error if the edge +// AipsOrErr returns the Aips value or an error if the edge // was not loaded in eager-loading. -func (e LocationEdges) PackagesOrErr() ([]*Pkg, error) { +func (e LocationEdges) AipsOrErr() ([]*AIP, error) { if e.loadedTypes[0] { - return e.Packages, nil + return e.Aips, nil } - return nil, &NotLoadedError{edge: "packages"} + return nil, &NotLoadedError{edge: "aips"} } // scanValues returns the types for scanning values from sql.Rows. @@ -155,9 +155,9 @@ func (l *Location) Value(name string) (ent.Value, error) { return l.selectValues.Get(name) } -// QueryPackages queries the "packages" edge of the Location entity. -func (l *Location) QueryPackages() *PkgQuery { - return NewLocationClient(l.config).QueryPackages(l) +// QueryAips queries the "aips" edge of the Location entity. +func (l *Location) QueryAips() *AIPQuery { + return NewLocationClient(l.config).QueryAips(l) } // Update returns a builder for updating this Location. diff --git a/internal/storage/persistence/ent/db/location/location.go b/internal/storage/persistence/ent/db/location/location.go index d7faa2145..1e6e082d0 100644 --- a/internal/storage/persistence/ent/db/location/location.go +++ b/internal/storage/persistence/ent/db/location/location.go @@ -30,17 +30,17 @@ const ( FieldConfig = "config" // FieldCreatedAt holds the string denoting the created_at field in the database. FieldCreatedAt = "created_at" - // EdgePackages holds the string denoting the packages edge name in mutations. - EdgePackages = "packages" + // EdgeAips holds the string denoting the aips edge name in mutations. + EdgeAips = "aips" // Table holds the table name of the location in the database. Table = "location" - // PackagesTable is the table that holds the packages relation/edge. - PackagesTable = "package" - // PackagesInverseTable is the table name for the Pkg entity. - // It exists in this package in order to avoid circular dependency with the "pkg" package. - PackagesInverseTable = "package" - // PackagesColumn is the table column denoting the packages relation/edge. - PackagesColumn = "location_id" + // AipsTable is the table that holds the aips relation/edge. + AipsTable = "aip" + // AipsInverseTable is the table name for the AIP entity. + // It exists in this package in order to avoid circular dependency with the "aip" package. + AipsInverseTable = "aip" + // AipsColumn is the table column denoting the aips relation/edge. + AipsColumn = "location_id" ) // Columns holds all SQL columns for location fields. @@ -128,23 +128,23 @@ func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() } -// ByPackagesCount orders the results by packages count. -func ByPackagesCount(opts ...sql.OrderTermOption) OrderOption { +// ByAipsCount orders the results by aips count. +func ByAipsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { - sqlgraph.OrderByNeighborsCount(s, newPackagesStep(), opts...) + sqlgraph.OrderByNeighborsCount(s, newAipsStep(), opts...) } } -// ByPackages orders the results by packages terms. -func ByPackages(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { +// ByAips orders the results by aips terms. +func ByAips(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { return func(s *sql.Selector) { - sqlgraph.OrderByNeighborTerms(s, newPackagesStep(), append([]sql.OrderTerm{term}, terms...)...) + sqlgraph.OrderByNeighborTerms(s, newAipsStep(), append([]sql.OrderTerm{term}, terms...)...) } } -func newPackagesStep() *sqlgraph.Step { +func newAipsStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(PackagesInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, PackagesTable, PackagesColumn), + sqlgraph.To(AipsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AipsTable, AipsColumn), ) } diff --git a/internal/storage/persistence/ent/db/location/where.go b/internal/storage/persistence/ent/db/location/where.go index 72720aff5..a03fa4706 100644 --- a/internal/storage/persistence/ent/db/location/where.go +++ b/internal/storage/persistence/ent/db/location/where.go @@ -327,21 +327,21 @@ func CreatedAtLTE(v time.Time) predicate.Location { return predicate.Location(sql.FieldLTE(FieldCreatedAt, v)) } -// HasPackages applies the HasEdge predicate on the "packages" edge. -func HasPackages() predicate.Location { +// HasAips applies the HasEdge predicate on the "aips" edge. +func HasAips() predicate.Location { return predicate.Location(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, PackagesTable, PackagesColumn), + sqlgraph.Edge(sqlgraph.O2M, true, AipsTable, AipsColumn), ) sqlgraph.HasNeighbors(s, step) }) } -// HasPackagesWith applies the HasEdge predicate on the "packages" edge with a given conditions (other predicates). -func HasPackagesWith(preds ...predicate.Pkg) predicate.Location { +// HasAipsWith applies the HasEdge predicate on the "aips" edge with a given conditions (other predicates). +func HasAipsWith(preds ...predicate.AIP) predicate.Location { return predicate.Location(func(s *sql.Selector) { - step := newPackagesStep() + step := newAipsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) diff --git a/internal/storage/persistence/ent/db/location_create.go b/internal/storage/persistence/ent/db/location_create.go index 439b6f92f..c7e12ffb1 100644 --- a/internal/storage/persistence/ent/db/location_create.go +++ b/internal/storage/persistence/ent/db/location_create.go @@ -10,8 +10,8 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/types" "github.com/google/uuid" ) @@ -73,19 +73,19 @@ func (lc *LocationCreate) SetNillableCreatedAt(t *time.Time) *LocationCreate { return lc } -// AddPackageIDs adds the "packages" edge to the Pkg entity by IDs. -func (lc *LocationCreate) AddPackageIDs(ids ...int) *LocationCreate { - lc.mutation.AddPackageIDs(ids...) +// AddAipIDs adds the "aips" edge to the AIP entity by IDs. +func (lc *LocationCreate) AddAipIDs(ids ...int) *LocationCreate { + lc.mutation.AddAipIDs(ids...) return lc } -// AddPackages adds the "packages" edges to the Pkg entity. -func (lc *LocationCreate) AddPackages(p ...*Pkg) *LocationCreate { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID +// AddAips adds the "aips" edges to the AIP entity. +func (lc *LocationCreate) AddAips(a ...*AIP) *LocationCreate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID } - return lc.AddPackageIDs(ids...) + return lc.AddAipIDs(ids...) } // Mutation returns the LocationMutation object of the builder. @@ -216,15 +216,15 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { _spec.SetField(location.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value } - if nodes := lc.mutation.PackagesIDs(); len(nodes) > 0 { + if nodes := lc.mutation.AipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } for _, k := range nodes { diff --git a/internal/storage/persistence/ent/db/location_query.go b/internal/storage/persistence/ent/db/location_query.go index f62c31c62..0147d2fd1 100644 --- a/internal/storage/persistence/ent/db/location_query.go +++ b/internal/storage/persistence/ent/db/location_query.go @@ -12,19 +12,19 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" ) // LocationQuery is the builder for querying Location entities. type LocationQuery struct { config - ctx *QueryContext - order []location.OrderOption - inters []Interceptor - predicates []predicate.Location - withPackages *PkgQuery + ctx *QueryContext + order []location.OrderOption + inters []Interceptor + predicates []predicate.Location + withAips *AIPQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -61,9 +61,9 @@ func (lq *LocationQuery) Order(o ...location.OrderOption) *LocationQuery { return lq } -// QueryPackages chains the current query on the "packages" edge. -func (lq *LocationQuery) QueryPackages() *PkgQuery { - query := (&PkgClient{config: lq.config}).Query() +// QueryAips chains the current query on the "aips" edge. +func (lq *LocationQuery) QueryAips() *AIPQuery { + query := (&AIPClient{config: lq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := lq.prepareQuery(ctx); err != nil { return nil, err @@ -74,8 +74,8 @@ func (lq *LocationQuery) QueryPackages() *PkgQuery { } step := sqlgraph.NewStep( sqlgraph.From(location.Table, location.FieldID, selector), - sqlgraph.To(pkg.Table, pkg.FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, location.PackagesTable, location.PackagesColumn), + sqlgraph.To(aip.Table, aip.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, location.AipsTable, location.AipsColumn), ) fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step) return fromU, nil @@ -270,26 +270,26 @@ func (lq *LocationQuery) Clone() *LocationQuery { return nil } return &LocationQuery{ - config: lq.config, - ctx: lq.ctx.Clone(), - order: append([]location.OrderOption{}, lq.order...), - inters: append([]Interceptor{}, lq.inters...), - predicates: append([]predicate.Location{}, lq.predicates...), - withPackages: lq.withPackages.Clone(), + config: lq.config, + ctx: lq.ctx.Clone(), + order: append([]location.OrderOption{}, lq.order...), + inters: append([]Interceptor{}, lq.inters...), + predicates: append([]predicate.Location{}, lq.predicates...), + withAips: lq.withAips.Clone(), // clone intermediate query. sql: lq.sql.Clone(), path: lq.path, } } -// WithPackages tells the query-builder to eager-load the nodes that are connected to -// the "packages" edge. The optional arguments are used to configure the query builder of the edge. -func (lq *LocationQuery) WithPackages(opts ...func(*PkgQuery)) *LocationQuery { - query := (&PkgClient{config: lq.config}).Query() +// WithAips tells the query-builder to eager-load the nodes that are connected to +// the "aips" edge. The optional arguments are used to configure the query builder of the edge. +func (lq *LocationQuery) WithAips(opts ...func(*AIPQuery)) *LocationQuery { + query := (&AIPClient{config: lq.config}).Query() for _, opt := range opts { opt(query) } - lq.withPackages = query + lq.withAips = query return lq } @@ -372,7 +372,7 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc nodes = []*Location{} _spec = lq.querySpec() loadedTypes = [1]bool{ - lq.withPackages != nil, + lq.withAips != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { @@ -393,17 +393,17 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc if len(nodes) == 0 { return nodes, nil } - if query := lq.withPackages; query != nil { - if err := lq.loadPackages(ctx, query, nodes, - func(n *Location) { n.Edges.Packages = []*Pkg{} }, - func(n *Location, e *Pkg) { n.Edges.Packages = append(n.Edges.Packages, e) }); err != nil { + if query := lq.withAips; query != nil { + if err := lq.loadAips(ctx, query, nodes, + func(n *Location) { n.Edges.Aips = []*AIP{} }, + func(n *Location, e *AIP) { n.Edges.Aips = append(n.Edges.Aips, e) }); err != nil { return nil, err } } return nodes, nil } -func (lq *LocationQuery) loadPackages(ctx context.Context, query *PkgQuery, nodes []*Location, init func(*Location), assign func(*Location, *Pkg)) error { +func (lq *LocationQuery) loadAips(ctx context.Context, query *AIPQuery, nodes []*Location, init func(*Location), assign func(*Location, *AIP)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[int]*Location) for i := range nodes { @@ -414,10 +414,10 @@ func (lq *LocationQuery) loadPackages(ctx context.Context, query *PkgQuery, node } } if len(query.ctx.Fields) > 0 { - query.ctx.AppendFieldOnce(pkg.FieldLocationID) + query.ctx.AppendFieldOnce(aip.FieldLocationID) } - query.Where(predicate.Pkg(func(s *sql.Selector) { - s.Where(sql.InValues(s.C(location.PackagesColumn), fks...)) + query.Where(predicate.AIP(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(location.AipsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { diff --git a/internal/storage/persistence/ent/db/location_update.go b/internal/storage/persistence/ent/db/location_update.go index 263d6cbfd..2b8d89901 100644 --- a/internal/storage/persistence/ent/db/location_update.go +++ b/internal/storage/persistence/ent/db/location_update.go @@ -10,8 +10,8 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" "github.com/artefactual-sdps/enduro/internal/storage/types" "github.com/google/uuid" @@ -114,19 +114,19 @@ func (lu *LocationUpdate) SetNillableConfig(tc *types.LocationConfig) *LocationU return lu } -// AddPackageIDs adds the "packages" edge to the Pkg entity by IDs. -func (lu *LocationUpdate) AddPackageIDs(ids ...int) *LocationUpdate { - lu.mutation.AddPackageIDs(ids...) +// AddAipIDs adds the "aips" edge to the AIP entity by IDs. +func (lu *LocationUpdate) AddAipIDs(ids ...int) *LocationUpdate { + lu.mutation.AddAipIDs(ids...) return lu } -// AddPackages adds the "packages" edges to the Pkg entity. -func (lu *LocationUpdate) AddPackages(p ...*Pkg) *LocationUpdate { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID +// AddAips adds the "aips" edges to the AIP entity. +func (lu *LocationUpdate) AddAips(a ...*AIP) *LocationUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID } - return lu.AddPackageIDs(ids...) + return lu.AddAipIDs(ids...) } // Mutation returns the LocationMutation object of the builder. @@ -134,25 +134,25 @@ func (lu *LocationUpdate) Mutation() *LocationMutation { return lu.mutation } -// ClearPackages clears all "packages" edges to the Pkg entity. -func (lu *LocationUpdate) ClearPackages() *LocationUpdate { - lu.mutation.ClearPackages() +// ClearAips clears all "aips" edges to the AIP entity. +func (lu *LocationUpdate) ClearAips() *LocationUpdate { + lu.mutation.ClearAips() return lu } -// RemovePackageIDs removes the "packages" edge to Pkg entities by IDs. -func (lu *LocationUpdate) RemovePackageIDs(ids ...int) *LocationUpdate { - lu.mutation.RemovePackageIDs(ids...) +// RemoveAipIDs removes the "aips" edge to AIP entities by IDs. +func (lu *LocationUpdate) RemoveAipIDs(ids ...int) *LocationUpdate { + lu.mutation.RemoveAipIDs(ids...) return lu } -// RemovePackages removes "packages" edges to Pkg entities. -func (lu *LocationUpdate) RemovePackages(p ...*Pkg) *LocationUpdate { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID +// RemoveAips removes "aips" edges to AIP entities. +func (lu *LocationUpdate) RemoveAips(a ...*AIP) *LocationUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID } - return lu.RemovePackageIDs(ids...) + return lu.RemoveAipIDs(ids...) } // Save executes the query and returns the number of nodes affected by the update operation. @@ -227,28 +227,28 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := lu.mutation.Config(); ok { _spec.SetField(location.FieldConfig, field.TypeJSON, value) } - if lu.mutation.PackagesCleared() { + if lu.mutation.AipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := lu.mutation.RemovedPackagesIDs(); len(nodes) > 0 && !lu.mutation.PackagesCleared() { + if nodes := lu.mutation.RemovedAipsIDs(); len(nodes) > 0 && !lu.mutation.AipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -256,15 +256,15 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := lu.mutation.PackagesIDs(); len(nodes) > 0 { + if nodes := lu.mutation.AipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -376,19 +376,19 @@ func (luo *LocationUpdateOne) SetNillableConfig(tc *types.LocationConfig) *Locat return luo } -// AddPackageIDs adds the "packages" edge to the Pkg entity by IDs. -func (luo *LocationUpdateOne) AddPackageIDs(ids ...int) *LocationUpdateOne { - luo.mutation.AddPackageIDs(ids...) +// AddAipIDs adds the "aips" edge to the AIP entity by IDs. +func (luo *LocationUpdateOne) AddAipIDs(ids ...int) *LocationUpdateOne { + luo.mutation.AddAipIDs(ids...) return luo } -// AddPackages adds the "packages" edges to the Pkg entity. -func (luo *LocationUpdateOne) AddPackages(p ...*Pkg) *LocationUpdateOne { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID +// AddAips adds the "aips" edges to the AIP entity. +func (luo *LocationUpdateOne) AddAips(a ...*AIP) *LocationUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID } - return luo.AddPackageIDs(ids...) + return luo.AddAipIDs(ids...) } // Mutation returns the LocationMutation object of the builder. @@ -396,25 +396,25 @@ func (luo *LocationUpdateOne) Mutation() *LocationMutation { return luo.mutation } -// ClearPackages clears all "packages" edges to the Pkg entity. -func (luo *LocationUpdateOne) ClearPackages() *LocationUpdateOne { - luo.mutation.ClearPackages() +// ClearAips clears all "aips" edges to the AIP entity. +func (luo *LocationUpdateOne) ClearAips() *LocationUpdateOne { + luo.mutation.ClearAips() return luo } -// RemovePackageIDs removes the "packages" edge to Pkg entities by IDs. -func (luo *LocationUpdateOne) RemovePackageIDs(ids ...int) *LocationUpdateOne { - luo.mutation.RemovePackageIDs(ids...) +// RemoveAipIDs removes the "aips" edge to AIP entities by IDs. +func (luo *LocationUpdateOne) RemoveAipIDs(ids ...int) *LocationUpdateOne { + luo.mutation.RemoveAipIDs(ids...) return luo } -// RemovePackages removes "packages" edges to Pkg entities. -func (luo *LocationUpdateOne) RemovePackages(p ...*Pkg) *LocationUpdateOne { - ids := make([]int, len(p)) - for i := range p { - ids[i] = p[i].ID +// RemoveAips removes "aips" edges to AIP entities. +func (luo *LocationUpdateOne) RemoveAips(a ...*AIP) *LocationUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID } - return luo.RemovePackageIDs(ids...) + return luo.RemoveAipIDs(ids...) } // Where appends a list predicates to the LocationUpdate builder. @@ -519,28 +519,28 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err if value, ok := luo.mutation.Config(); ok { _spec.SetField(location.FieldConfig, field.TypeJSON, value) } - if luo.mutation.PackagesCleared() { + if luo.mutation.AipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := luo.mutation.RemovedPackagesIDs(); len(nodes) > 0 && !luo.mutation.PackagesCleared() { + if nodes := luo.mutation.RemovedAipsIDs(); len(nodes) > 0 && !luo.mutation.AipsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -548,15 +548,15 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) } - if nodes := luo.mutation.PackagesIDs(); len(nodes) > 0 { + if nodes := luo.mutation.AipsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, Inverse: true, - Table: location.PackagesTable, - Columns: []string{location.PackagesColumn}, + Table: location.AipsTable, + Columns: []string{location.AipsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt), + IDSpec: sqlgraph.NewFieldSpec(aip.FieldID, field.TypeInt), }, } for _, k := range nodes { diff --git a/internal/storage/persistence/ent/db/migrate/schema.go b/internal/storage/persistence/ent/db/migrate/schema.go index 7c0ba4243..42c191244 100644 --- a/internal/storage/persistence/ent/db/migrate/schema.go +++ b/internal/storage/persistence/ent/db/migrate/schema.go @@ -9,6 +9,42 @@ import ( ) var ( + // AipColumns holds the columns for the "aip" table. + AipColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Size: 2048}, + {Name: "aip_id", Type: field.TypeUUID, Unique: true}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"unspecified", "in_review", "rejected", "stored", "moving"}}, + {Name: "object_key", Type: field.TypeUUID, Unique: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "location_id", Type: field.TypeInt, Nullable: true}, + } + // AipTable holds the schema information for the "aip" table. + AipTable = &schema.Table{ + Name: "aip", + Columns: AipColumns, + PrimaryKey: []*schema.Column{AipColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "aip_location_location", + Columns: []*schema.Column{AipColumns[6]}, + RefColumns: []*schema.Column{LocationColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "aip_aip_id", + Unique: false, + Columns: []*schema.Column{AipColumns[2]}, + }, + { + Name: "aip_object_key", + Unique: false, + Columns: []*schema.Column{AipColumns[4]}, + }, + }, + } // LocationColumns holds the columns for the "location" table. LocationColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, @@ -41,55 +77,19 @@ var ( }, }, } - // PackageColumns holds the columns for the "package" table. - PackageColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "name", Type: field.TypeString, Size: 2048}, - {Name: "aip_id", Type: field.TypeUUID, Unique: true}, - {Name: "status", Type: field.TypeEnum, Enums: []string{"unspecified", "in_review", "rejected", "stored", "moving"}}, - {Name: "object_key", Type: field.TypeUUID, Unique: true}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "location_id", Type: field.TypeInt, Nullable: true}, - } - // PackageTable holds the schema information for the "package" table. - PackageTable = &schema.Table{ - Name: "package", - Columns: PackageColumns, - PrimaryKey: []*schema.Column{PackageColumns[0]}, - ForeignKeys: []*schema.ForeignKey{ - { - Symbol: "package_location_location", - Columns: []*schema.Column{PackageColumns[6]}, - RefColumns: []*schema.Column{LocationColumns[0]}, - OnDelete: schema.SetNull, - }, - }, - Indexes: []*schema.Index{ - { - Name: "pkg_aip_id", - Unique: false, - Columns: []*schema.Column{PackageColumns[2]}, - }, - { - Name: "pkg_object_key", - Unique: false, - Columns: []*schema.Column{PackageColumns[4]}, - }, - }, - } // Tables holds all the tables in the schema. Tables = []*schema.Table{ + AipTable, LocationTable, - PackageTable, } ) func init() { + AipTable.ForeignKeys[0].RefTable = LocationTable + AipTable.Annotation = &entsql.Annotation{ + Table: "aip", + } LocationTable.Annotation = &entsql.Annotation{ Table: "location", } - PackageTable.ForeignKeys[0].RefTable = LocationTable - PackageTable.Annotation = &entsql.Annotation{ - Table: "package", - } } diff --git a/internal/storage/persistence/ent/db/mutation.go b/internal/storage/persistence/ent/db/mutation.go index 650ea63e9..acc0499a2 100644 --- a/internal/storage/persistence/ent/db/mutation.go +++ b/internal/storage/persistence/ent/db/mutation.go @@ -11,8 +11,8 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" "github.com/artefactual-sdps/enduro/internal/storage/types" "github.com/google/uuid" @@ -27,43 +27,40 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. + TypeAIP = "AIP" TypeLocation = "Location" - TypePkg = "Pkg" ) -// LocationMutation represents an operation that mutates the Location nodes in the graph. -type LocationMutation struct { +// AIPMutation represents an operation that mutates the AIP nodes in the graph. +type AIPMutation struct { config op Op typ string id *int name *string - description *string - source *types.LocationSource - purpose *types.LocationPurpose - uuid *uuid.UUID - _config *types.LocationConfig + aip_id *uuid.UUID + status *types.AIPStatus + object_key *uuid.UUID created_at *time.Time clearedFields map[string]struct{} - packages map[int]struct{} - removedpackages map[int]struct{} - clearedpackages bool + location *int + clearedlocation bool done bool - oldValue func(context.Context) (*Location, error) - predicates []predicate.Location + oldValue func(context.Context) (*AIP, error) + predicates []predicate.AIP } -var _ ent.Mutation = (*LocationMutation)(nil) +var _ ent.Mutation = (*AIPMutation)(nil) -// locationOption allows management of the mutation configuration using functional options. -type locationOption func(*LocationMutation) +// aipOption allows management of the mutation configuration using functional options. +type aipOption func(*AIPMutation) -// newLocationMutation creates new mutation for the Location entity. -func newLocationMutation(c config, op Op, opts ...locationOption) *LocationMutation { - m := &LocationMutation{ +// newAIPMutation creates new mutation for the AIP entity. +func newAIPMutation(c config, op Op, opts ...aipOption) *AIPMutation { + m := &AIPMutation{ config: c, op: op, - typ: TypeLocation, + typ: TypeAIP, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -72,20 +69,20 @@ func newLocationMutation(c config, op Op, opts ...locationOption) *LocationMutat return m } -// withLocationID sets the ID field of the mutation. -func withLocationID(id int) locationOption { - return func(m *LocationMutation) { +// withAIPID sets the ID field of the mutation. +func withAIPID(id int) aipOption { + return func(m *AIPMutation) { var ( err error once sync.Once - value *Location + value *AIP ) - m.oldValue = func(ctx context.Context) (*Location, error) { + m.oldValue = func(ctx context.Context) (*AIP, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().Location.Get(ctx, id) + value, err = m.Client().AIP.Get(ctx, id) } }) return value, err @@ -94,10 +91,10 @@ func withLocationID(id int) locationOption { } } -// withLocation sets the old Location of the mutation. -func withLocation(node *Location) locationOption { - return func(m *LocationMutation) { - m.oldValue = func(context.Context) (*Location, error) { +// withAIP sets the old AIP of the mutation. +func withAIP(node *AIP) aipOption { + return func(m *AIPMutation) { + m.oldValue = func(context.Context) (*AIP, error) { return node, nil } m.id = &node.ID @@ -106,7 +103,7 @@ func withLocation(node *Location) locationOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m LocationMutation) Client() *Client { +func (m AIPMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -114,7 +111,7 @@ func (m LocationMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m LocationMutation) Tx() (*Tx, error) { +func (m AIPMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -125,7 +122,7 @@ func (m LocationMutation) Tx() (*Tx, error) { // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *LocationMutation) ID() (id int, exists bool) { +func (m *AIPMutation) ID() (id int, exists bool) { if m.id == nil { return } @@ -136,7 +133,7 @@ func (m *LocationMutation) ID() (id int, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *LocationMutation) IDs(ctx context.Context) ([]int, error) { +func (m *AIPMutation) IDs(ctx context.Context) ([]int, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -145,19 +142,19 @@ func (m *LocationMutation) IDs(ctx context.Context) ([]int, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().Location.Query().Where(m.predicates...).IDs(ctx) + return m.Client().AIP.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } // SetName sets the "name" field. -func (m *LocationMutation) SetName(s string) { +func (m *AIPMutation) SetName(s string) { m.name = &s } // Name returns the value of the "name" field in the mutation. -func (m *LocationMutation) Name() (r string, exists bool) { +func (m *AIPMutation) Name() (r string, exists bool) { v := m.name if v == nil { return @@ -165,10 +162,10 @@ func (m *LocationMutation) Name() (r string, exists bool) { return *v, true } -// OldName returns the old "name" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. +// OldName returns the old "name" field's value of the AIP entity. +// If the AIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldName(ctx context.Context) (v string, err error) { +func (m *AIPMutation) OldName(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldName is only allowed on UpdateOne operations") } @@ -183,197 +180,174 @@ func (m *LocationMutation) OldName(ctx context.Context) (v string, err error) { } // ResetName resets all changes to the "name" field. -func (m *LocationMutation) ResetName() { +func (m *AIPMutation) ResetName() { m.name = nil } -// SetDescription sets the "description" field. -func (m *LocationMutation) SetDescription(s string) { - m.description = &s +// SetAipID sets the "aip_id" field. +func (m *AIPMutation) SetAipID(u uuid.UUID) { + m.aip_id = &u } -// Description returns the value of the "description" field in the mutation. -func (m *LocationMutation) Description() (r string, exists bool) { - v := m.description +// AipID returns the value of the "aip_id" field in the mutation. +func (m *AIPMutation) AipID() (r uuid.UUID, exists bool) { + v := m.aip_id if v == nil { return } return *v, true } -// OldDescription returns the old "description" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. +// OldAipID returns the old "aip_id" field's value of the AIP entity. +// If the AIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldDescription(ctx context.Context) (v string, err error) { +func (m *AIPMutation) OldAipID(ctx context.Context) (v uuid.UUID, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldDescription is only allowed on UpdateOne operations") + return v, errors.New("OldAipID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldDescription requires an ID field in the mutation") + return v, errors.New("OldAipID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldDescription: %w", err) + return v, fmt.Errorf("querying old value for OldAipID: %w", err) } - return oldValue.Description, nil + return oldValue.AipID, nil } -// ResetDescription resets all changes to the "description" field. -func (m *LocationMutation) ResetDescription() { - m.description = nil +// ResetAipID resets all changes to the "aip_id" field. +func (m *AIPMutation) ResetAipID() { + m.aip_id = nil } -// SetSource sets the "source" field. -func (m *LocationMutation) SetSource(ts types.LocationSource) { - m.source = &ts +// SetLocationID sets the "location_id" field. +func (m *AIPMutation) SetLocationID(i int) { + m.location = &i } -// Source returns the value of the "source" field in the mutation. -func (m *LocationMutation) Source() (r types.LocationSource, exists bool) { - v := m.source +// LocationID returns the value of the "location_id" field in the mutation. +func (m *AIPMutation) LocationID() (r int, exists bool) { + v := m.location if v == nil { return } return *v, true } -// OldSource returns the old "source" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. +// OldLocationID returns the old "location_id" field's value of the AIP entity. +// If the AIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldSource(ctx context.Context) (v types.LocationSource, err error) { +func (m *AIPMutation) OldLocationID(ctx context.Context) (v int, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSource is only allowed on UpdateOne operations") + return v, errors.New("OldLocationID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSource requires an ID field in the mutation") + return v, errors.New("OldLocationID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldSource: %w", err) + return v, fmt.Errorf("querying old value for OldLocationID: %w", err) } - return oldValue.Source, nil -} - -// ResetSource resets all changes to the "source" field. -func (m *LocationMutation) ResetSource() { - m.source = nil -} - -// SetPurpose sets the "purpose" field. -func (m *LocationMutation) SetPurpose(tp types.LocationPurpose) { - m.purpose = &tp + return oldValue.LocationID, nil } -// Purpose returns the value of the "purpose" field in the mutation. -func (m *LocationMutation) Purpose() (r types.LocationPurpose, exists bool) { - v := m.purpose - if v == nil { - return - } - return *v, true +// ClearLocationID clears the value of the "location_id" field. +func (m *AIPMutation) ClearLocationID() { + m.location = nil + m.clearedFields[aip.FieldLocationID] = struct{}{} } -// OldPurpose returns the old "purpose" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldPurpose(ctx context.Context) (v types.LocationPurpose, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPurpose is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPurpose requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPurpose: %w", err) - } - return oldValue.Purpose, nil +// LocationIDCleared returns if the "location_id" field was cleared in this mutation. +func (m *AIPMutation) LocationIDCleared() bool { + _, ok := m.clearedFields[aip.FieldLocationID] + return ok } -// ResetPurpose resets all changes to the "purpose" field. -func (m *LocationMutation) ResetPurpose() { - m.purpose = nil +// ResetLocationID resets all changes to the "location_id" field. +func (m *AIPMutation) ResetLocationID() { + m.location = nil + delete(m.clearedFields, aip.FieldLocationID) } -// SetUUID sets the "uuid" field. -func (m *LocationMutation) SetUUID(u uuid.UUID) { - m.uuid = &u +// SetStatus sets the "status" field. +func (m *AIPMutation) SetStatus(ts types.AIPStatus) { + m.status = &ts } -// UUID returns the value of the "uuid" field in the mutation. -func (m *LocationMutation) UUID() (r uuid.UUID, exists bool) { - v := m.uuid +// Status returns the value of the "status" field in the mutation. +func (m *AIPMutation) Status() (r types.AIPStatus, exists bool) { + v := m.status if v == nil { return } return *v, true } -// OldUUID returns the old "uuid" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. +// OldStatus returns the old "status" field's value of the AIP entity. +// If the AIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldUUID(ctx context.Context) (v uuid.UUID, err error) { +func (m *AIPMutation) OldStatus(ctx context.Context) (v types.AIPStatus, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUUID is only allowed on UpdateOne operations") + return v, errors.New("OldStatus is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUUID requires an ID field in the mutation") + return v, errors.New("OldStatus requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldUUID: %w", err) + return v, fmt.Errorf("querying old value for OldStatus: %w", err) } - return oldValue.UUID, nil + return oldValue.Status, nil } -// ResetUUID resets all changes to the "uuid" field. -func (m *LocationMutation) ResetUUID() { - m.uuid = nil +// ResetStatus resets all changes to the "status" field. +func (m *AIPMutation) ResetStatus() { + m.status = nil } -// SetConfig sets the "config" field. -func (m *LocationMutation) SetConfig(tc types.LocationConfig) { - m._config = &tc +// SetObjectKey sets the "object_key" field. +func (m *AIPMutation) SetObjectKey(u uuid.UUID) { + m.object_key = &u } -// Config returns the value of the "config" field in the mutation. -func (m *LocationMutation) Config() (r types.LocationConfig, exists bool) { - v := m._config +// ObjectKey returns the value of the "object_key" field in the mutation. +func (m *AIPMutation) ObjectKey() (r uuid.UUID, exists bool) { + v := m.object_key if v == nil { return } return *v, true } -// OldConfig returns the old "config" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. +// OldObjectKey returns the old "object_key" field's value of the AIP entity. +// If the AIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldConfig(ctx context.Context) (v types.LocationConfig, err error) { +func (m *AIPMutation) OldObjectKey(ctx context.Context) (v uuid.UUID, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldConfig is only allowed on UpdateOne operations") + return v, errors.New("OldObjectKey is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldConfig requires an ID field in the mutation") + return v, errors.New("OldObjectKey requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldConfig: %w", err) + return v, fmt.Errorf("querying old value for OldObjectKey: %w", err) } - return oldValue.Config, nil + return oldValue.ObjectKey, nil } -// ResetConfig resets all changes to the "config" field. -func (m *LocationMutation) ResetConfig() { - m._config = nil +// ResetObjectKey resets all changes to the "object_key" field. +func (m *AIPMutation) ResetObjectKey() { + m.object_key = nil } // SetCreatedAt sets the "created_at" field. -func (m *LocationMutation) SetCreatedAt(t time.Time) { +func (m *AIPMutation) SetCreatedAt(t time.Time) { m.created_at = &t } // CreatedAt returns the value of the "created_at" field in the mutation. -func (m *LocationMutation) CreatedAt() (r time.Time, exists bool) { +func (m *AIPMutation) CreatedAt() (r time.Time, exists bool) { v := m.created_at if v == nil { return @@ -381,10 +355,10 @@ func (m *LocationMutation) CreatedAt() (r time.Time, exists bool) { return *v, true } -// OldCreatedAt returns the old "created_at" field's value of the Location entity. -// If the Location object wasn't provided to the builder, the object is fetched from the database. +// OldCreatedAt returns the old "created_at" field's value of the AIP entity. +// If the AIP object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *LocationMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { +func (m *AIPMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -399,73 +373,46 @@ func (m *LocationMutation) OldCreatedAt(ctx context.Context) (v time.Time, err e } // ResetCreatedAt resets all changes to the "created_at" field. -func (m *LocationMutation) ResetCreatedAt() { +func (m *AIPMutation) ResetCreatedAt() { m.created_at = nil } -// AddPackageIDs adds the "packages" edge to the Pkg entity by ids. -func (m *LocationMutation) AddPackageIDs(ids ...int) { - if m.packages == nil { - m.packages = make(map[int]struct{}) - } - for i := range ids { - m.packages[ids[i]] = struct{}{} - } -} - -// ClearPackages clears the "packages" edge to the Pkg entity. -func (m *LocationMutation) ClearPackages() { - m.clearedpackages = true -} - -// PackagesCleared reports if the "packages" edge to the Pkg entity was cleared. -func (m *LocationMutation) PackagesCleared() bool { - return m.clearedpackages -} - -// RemovePackageIDs removes the "packages" edge to the Pkg entity by IDs. -func (m *LocationMutation) RemovePackageIDs(ids ...int) { - if m.removedpackages == nil { - m.removedpackages = make(map[int]struct{}) - } - for i := range ids { - delete(m.packages, ids[i]) - m.removedpackages[ids[i]] = struct{}{} - } +// ClearLocation clears the "location" edge to the Location entity. +func (m *AIPMutation) ClearLocation() { + m.clearedlocation = true + m.clearedFields[aip.FieldLocationID] = struct{}{} } -// RemovedPackages returns the removed IDs of the "packages" edge to the Pkg entity. -func (m *LocationMutation) RemovedPackagesIDs() (ids []int) { - for id := range m.removedpackages { - ids = append(ids, id) - } - return +// LocationCleared reports if the "location" edge to the Location entity was cleared. +func (m *AIPMutation) LocationCleared() bool { + return m.LocationIDCleared() || m.clearedlocation } -// PackagesIDs returns the "packages" edge IDs in the mutation. -func (m *LocationMutation) PackagesIDs() (ids []int) { - for id := range m.packages { - ids = append(ids, id) +// LocationIDs returns the "location" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// LocationID instead. It exists only for internal usage by the builders. +func (m *AIPMutation) LocationIDs() (ids []int) { + if id := m.location; id != nil { + ids = append(ids, *id) } return } -// ResetPackages resets all changes to the "packages" edge. -func (m *LocationMutation) ResetPackages() { - m.packages = nil - m.clearedpackages = false - m.removedpackages = nil +// ResetLocation resets all changes to the "location" edge. +func (m *AIPMutation) ResetLocation() { + m.location = nil + m.clearedlocation = false } -// Where appends a list predicates to the LocationMutation builder. -func (m *LocationMutation) Where(ps ...predicate.Location) { +// Where appends a list predicates to the AIPMutation builder. +func (m *AIPMutation) Where(ps ...predicate.AIP) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the LocationMutation builder. Using this method, +// WhereP appends storage-level predicates to the AIPMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *LocationMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.Location, len(ps)) +func (m *AIPMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AIP, len(ps)) for i := range ps { p[i] = ps[i] } @@ -473,45 +420,42 @@ func (m *LocationMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *LocationMutation) Op() Op { +func (m *AIPMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *LocationMutation) SetOp(op Op) { +func (m *AIPMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (Location). -func (m *LocationMutation) Type() string { +// Type returns the node type of this mutation (AIP). +func (m *AIPMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *LocationMutation) Fields() []string { - fields := make([]string, 0, 7) +func (m *AIPMutation) Fields() []string { + fields := make([]string, 0, 6) if m.name != nil { - fields = append(fields, location.FieldName) - } - if m.description != nil { - fields = append(fields, location.FieldDescription) + fields = append(fields, aip.FieldName) } - if m.source != nil { - fields = append(fields, location.FieldSource) + if m.aip_id != nil { + fields = append(fields, aip.FieldAipID) } - if m.purpose != nil { - fields = append(fields, location.FieldPurpose) + if m.location != nil { + fields = append(fields, aip.FieldLocationID) } - if m.uuid != nil { - fields = append(fields, location.FieldUUID) + if m.status != nil { + fields = append(fields, aip.FieldStatus) } - if m._config != nil { - fields = append(fields, location.FieldConfig) + if m.object_key != nil { + fields = append(fields, aip.FieldObjectKey) } if m.created_at != nil { - fields = append(fields, location.FieldCreatedAt) + fields = append(fields, aip.FieldCreatedAt) } return fields } @@ -519,21 +463,19 @@ func (m *LocationMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *LocationMutation) Field(name string) (ent.Value, bool) { +func (m *AIPMutation) Field(name string) (ent.Value, bool) { switch name { - case location.FieldName: + case aip.FieldName: return m.Name() - case location.FieldDescription: - return m.Description() - case location.FieldSource: - return m.Source() - case location.FieldPurpose: - return m.Purpose() - case location.FieldUUID: - return m.UUID() - case location.FieldConfig: - return m.Config() - case location.FieldCreatedAt: + case aip.FieldAipID: + return m.AipID() + case aip.FieldLocationID: + return m.LocationID() + case aip.FieldStatus: + return m.Status() + case aip.FieldObjectKey: + return m.ObjectKey() + case aip.FieldCreatedAt: return m.CreatedAt() } return nil, false @@ -542,74 +484,65 @@ func (m *LocationMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *LocationMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *AIPMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case location.FieldName: + case aip.FieldName: return m.OldName(ctx) - case location.FieldDescription: - return m.OldDescription(ctx) - case location.FieldSource: - return m.OldSource(ctx) - case location.FieldPurpose: - return m.OldPurpose(ctx) - case location.FieldUUID: - return m.OldUUID(ctx) - case location.FieldConfig: - return m.OldConfig(ctx) - case location.FieldCreatedAt: + case aip.FieldAipID: + return m.OldAipID(ctx) + case aip.FieldLocationID: + return m.OldLocationID(ctx) + case aip.FieldStatus: + return m.OldStatus(ctx) + case aip.FieldObjectKey: + return m.OldObjectKey(ctx) + case aip.FieldCreatedAt: return m.OldCreatedAt(ctx) } - return nil, fmt.Errorf("unknown Location field %s", name) + return nil, fmt.Errorf("unknown AIP field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *LocationMutation) SetField(name string, value ent.Value) error { +func (m *AIPMutation) SetField(name string, value ent.Value) error { switch name { - case location.FieldName: + case aip.FieldName: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetName(v) return nil - case location.FieldDescription: - v, ok := value.(string) + case aip.FieldAipID: + v, ok := value.(uuid.UUID) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetDescription(v) + m.SetAipID(v) return nil - case location.FieldSource: - v, ok := value.(types.LocationSource) + case aip.FieldLocationID: + v, ok := value.(int) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetSource(v) + m.SetLocationID(v) return nil - case location.FieldPurpose: - v, ok := value.(types.LocationPurpose) + case aip.FieldStatus: + v, ok := value.(types.AIPStatus) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetPurpose(v) + m.SetStatus(v) return nil - case location.FieldUUID: + case aip.FieldObjectKey: v, ok := value.(uuid.UUID) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetUUID(v) - return nil - case location.FieldConfig: - v, ok := value.(types.LocationConfig) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetConfig(v) + m.SetObjectKey(v) return nil - case location.FieldCreatedAt: + case aip.FieldCreatedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) @@ -617,193 +550,195 @@ func (m *LocationMutation) SetField(name string, value ent.Value) error { m.SetCreatedAt(v) return nil } - return fmt.Errorf("unknown Location field %s", name) + return fmt.Errorf("unknown AIP field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *LocationMutation) AddedFields() []string { - return nil +func (m *AIPMutation) AddedFields() []string { + var fields []string + return fields } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *LocationMutation) AddedField(name string) (ent.Value, bool) { +func (m *AIPMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *LocationMutation) AddField(name string, value ent.Value) error { +func (m *AIPMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown Location numeric field %s", name) + return fmt.Errorf("unknown AIP numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *LocationMutation) ClearedFields() []string { - return nil +func (m *AIPMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(aip.FieldLocationID) { + fields = append(fields, aip.FieldLocationID) + } + return fields } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *LocationMutation) FieldCleared(name string) bool { +func (m *AIPMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *LocationMutation) ClearField(name string) error { - return fmt.Errorf("unknown Location nullable field %s", name) +func (m *AIPMutation) ClearField(name string) error { + switch name { + case aip.FieldLocationID: + m.ClearLocationID() + return nil + } + return fmt.Errorf("unknown AIP nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *LocationMutation) ResetField(name string) error { +func (m *AIPMutation) ResetField(name string) error { switch name { - case location.FieldName: + case aip.FieldName: m.ResetName() return nil - case location.FieldDescription: - m.ResetDescription() - return nil - case location.FieldSource: - m.ResetSource() + case aip.FieldAipID: + m.ResetAipID() return nil - case location.FieldPurpose: - m.ResetPurpose() + case aip.FieldLocationID: + m.ResetLocationID() return nil - case location.FieldUUID: - m.ResetUUID() + case aip.FieldStatus: + m.ResetStatus() return nil - case location.FieldConfig: - m.ResetConfig() + case aip.FieldObjectKey: + m.ResetObjectKey() return nil - case location.FieldCreatedAt: + case aip.FieldCreatedAt: m.ResetCreatedAt() return nil } - return fmt.Errorf("unknown Location field %s", name) + return fmt.Errorf("unknown AIP field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *LocationMutation) AddedEdges() []string { +func (m *AIPMutation) AddedEdges() []string { edges := make([]string, 0, 1) - if m.packages != nil { - edges = append(edges, location.EdgePackages) + if m.location != nil { + edges = append(edges, aip.EdgeLocation) } return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *LocationMutation) AddedIDs(name string) []ent.Value { +func (m *AIPMutation) AddedIDs(name string) []ent.Value { switch name { - case location.EdgePackages: - ids := make([]ent.Value, 0, len(m.packages)) - for id := range m.packages { - ids = append(ids, id) + case aip.EdgeLocation: + if id := m.location; id != nil { + return []ent.Value{*id} } - return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *LocationMutation) RemovedEdges() []string { +func (m *AIPMutation) RemovedEdges() []string { edges := make([]string, 0, 1) - if m.removedpackages != nil { - edges = append(edges, location.EdgePackages) - } return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *LocationMutation) RemovedIDs(name string) []ent.Value { - switch name { - case location.EdgePackages: - ids := make([]ent.Value, 0, len(m.removedpackages)) - for id := range m.removedpackages { - ids = append(ids, id) - } - return ids - } +func (m *AIPMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *LocationMutation) ClearedEdges() []string { +func (m *AIPMutation) ClearedEdges() []string { edges := make([]string, 0, 1) - if m.clearedpackages { - edges = append(edges, location.EdgePackages) + if m.clearedlocation { + edges = append(edges, aip.EdgeLocation) } return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *LocationMutation) EdgeCleared(name string) bool { +func (m *AIPMutation) EdgeCleared(name string) bool { switch name { - case location.EdgePackages: - return m.clearedpackages + case aip.EdgeLocation: + return m.clearedlocation } return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *LocationMutation) ClearEdge(name string) error { +func (m *AIPMutation) ClearEdge(name string) error { switch name { + case aip.EdgeLocation: + m.ClearLocation() + return nil } - return fmt.Errorf("unknown Location unique edge %s", name) + return fmt.Errorf("unknown AIP unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *LocationMutation) ResetEdge(name string) error { +func (m *AIPMutation) ResetEdge(name string) error { switch name { - case location.EdgePackages: - m.ResetPackages() + case aip.EdgeLocation: + m.ResetLocation() return nil } - return fmt.Errorf("unknown Location edge %s", name) + return fmt.Errorf("unknown AIP edge %s", name) } -// PkgMutation represents an operation that mutates the Pkg nodes in the graph. -type PkgMutation struct { +// LocationMutation represents an operation that mutates the Location nodes in the graph. +type LocationMutation struct { config - op Op - typ string - id *int - name *string - aip_id *uuid.UUID - status *types.PackageStatus - object_key *uuid.UUID - created_at *time.Time - clearedFields map[string]struct{} - location *int - clearedlocation bool - done bool - oldValue func(context.Context) (*Pkg, error) - predicates []predicate.Pkg + op Op + typ string + id *int + name *string + description *string + source *types.LocationSource + purpose *types.LocationPurpose + uuid *uuid.UUID + _config *types.LocationConfig + created_at *time.Time + clearedFields map[string]struct{} + aips map[int]struct{} + removedaips map[int]struct{} + clearedaips bool + done bool + oldValue func(context.Context) (*Location, error) + predicates []predicate.Location } -var _ ent.Mutation = (*PkgMutation)(nil) +var _ ent.Mutation = (*LocationMutation)(nil) -// pkgOption allows management of the mutation configuration using functional options. -type pkgOption func(*PkgMutation) +// locationOption allows management of the mutation configuration using functional options. +type locationOption func(*LocationMutation) -// newPkgMutation creates new mutation for the Pkg entity. -func newPkgMutation(c config, op Op, opts ...pkgOption) *PkgMutation { - m := &PkgMutation{ +// newLocationMutation creates new mutation for the Location entity. +func newLocationMutation(c config, op Op, opts ...locationOption) *LocationMutation { + m := &LocationMutation{ config: c, op: op, - typ: TypePkg, + typ: TypeLocation, clearedFields: make(map[string]struct{}), } for _, opt := range opts { @@ -812,20 +747,20 @@ func newPkgMutation(c config, op Op, opts ...pkgOption) *PkgMutation { return m } -// withPkgID sets the ID field of the mutation. -func withPkgID(id int) pkgOption { - return func(m *PkgMutation) { +// withLocationID sets the ID field of the mutation. +func withLocationID(id int) locationOption { + return func(m *LocationMutation) { var ( err error once sync.Once - value *Pkg + value *Location ) - m.oldValue = func(ctx context.Context) (*Pkg, error) { + m.oldValue = func(ctx context.Context) (*Location, error) { once.Do(func() { if m.done { err = errors.New("querying old values post mutation is not allowed") } else { - value, err = m.Client().Pkg.Get(ctx, id) + value, err = m.Client().Location.Get(ctx, id) } }) return value, err @@ -834,10 +769,10 @@ func withPkgID(id int) pkgOption { } } -// withPkg sets the old Pkg of the mutation. -func withPkg(node *Pkg) pkgOption { - return func(m *PkgMutation) { - m.oldValue = func(context.Context) (*Pkg, error) { +// withLocation sets the old Location of the mutation. +func withLocation(node *Location) locationOption { + return func(m *LocationMutation) { + m.oldValue = func(context.Context) (*Location, error) { return node, nil } m.id = &node.ID @@ -846,7 +781,7 @@ func withPkg(node *Pkg) pkgOption { // Client returns a new `ent.Client` from the mutation. If the mutation was // executed in a transaction (ent.Tx), a transactional client is returned. -func (m PkgMutation) Client() *Client { +func (m LocationMutation) Client() *Client { client := &Client{config: m.config} client.init() return client @@ -854,7 +789,7 @@ func (m PkgMutation) Client() *Client { // Tx returns an `ent.Tx` for mutations that were executed in transactions; // it returns an error otherwise. -func (m PkgMutation) Tx() (*Tx, error) { +func (m LocationMutation) Tx() (*Tx, error) { if _, ok := m.driver.(*txDriver); !ok { return nil, errors.New("db: mutation is not running in a transaction") } @@ -865,7 +800,7 @@ func (m PkgMutation) Tx() (*Tx, error) { // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *PkgMutation) ID() (id int, exists bool) { +func (m *LocationMutation) ID() (id int, exists bool) { if m.id == nil { return } @@ -876,7 +811,7 @@ func (m *PkgMutation) ID() (id int, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *PkgMutation) IDs(ctx context.Context) ([]int, error) { +func (m *LocationMutation) IDs(ctx context.Context) ([]int, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() @@ -885,19 +820,19 @@ func (m *PkgMutation) IDs(ctx context.Context) ([]int, error) { } fallthrough case m.op.Is(OpUpdate | OpDelete): - return m.Client().Pkg.Query().Where(m.predicates...).IDs(ctx) + return m.Client().Location.Query().Where(m.predicates...).IDs(ctx) default: return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) } } // SetName sets the "name" field. -func (m *PkgMutation) SetName(s string) { +func (m *LocationMutation) SetName(s string) { m.name = &s } // Name returns the value of the "name" field in the mutation. -func (m *PkgMutation) Name() (r string, exists bool) { +func (m *LocationMutation) Name() (r string, exists bool) { v := m.name if v == nil { return @@ -905,10 +840,10 @@ func (m *PkgMutation) Name() (r string, exists bool) { return *v, true } -// OldName returns the old "name" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldName returns the old "name" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldName(ctx context.Context) (v string, err error) { +func (m *LocationMutation) OldName(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldName is only allowed on UpdateOne operations") } @@ -923,174 +858,197 @@ func (m *PkgMutation) OldName(ctx context.Context) (v string, err error) { } // ResetName resets all changes to the "name" field. -func (m *PkgMutation) ResetName() { +func (m *LocationMutation) ResetName() { m.name = nil } -// SetAipID sets the "aip_id" field. -func (m *PkgMutation) SetAipID(u uuid.UUID) { - m.aip_id = &u +// SetDescription sets the "description" field. +func (m *LocationMutation) SetDescription(s string) { + m.description = &s } -// AipID returns the value of the "aip_id" field in the mutation. -func (m *PkgMutation) AipID() (r uuid.UUID, exists bool) { - v := m.aip_id +// Description returns the value of the "description" field in the mutation. +func (m *LocationMutation) Description() (r string, exists bool) { + v := m.description if v == nil { return } return *v, true } -// OldAipID returns the old "aip_id" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldDescription returns the old "description" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldAipID(ctx context.Context) (v uuid.UUID, err error) { +func (m *LocationMutation) OldDescription(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldAipID is only allowed on UpdateOne operations") + return v, errors.New("OldDescription is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldAipID requires an ID field in the mutation") + return v, errors.New("OldDescription requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldAipID: %w", err) + return v, fmt.Errorf("querying old value for OldDescription: %w", err) } - return oldValue.AipID, nil + return oldValue.Description, nil } -// ResetAipID resets all changes to the "aip_id" field. -func (m *PkgMutation) ResetAipID() { - m.aip_id = nil +// ResetDescription resets all changes to the "description" field. +func (m *LocationMutation) ResetDescription() { + m.description = nil } -// SetLocationID sets the "location_id" field. -func (m *PkgMutation) SetLocationID(i int) { - m.location = &i +// SetSource sets the "source" field. +func (m *LocationMutation) SetSource(ts types.LocationSource) { + m.source = &ts } -// LocationID returns the value of the "location_id" field in the mutation. -func (m *PkgMutation) LocationID() (r int, exists bool) { - v := m.location +// Source returns the value of the "source" field in the mutation. +func (m *LocationMutation) Source() (r types.LocationSource, exists bool) { + v := m.source if v == nil { return } return *v, true } -// OldLocationID returns the old "location_id" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldSource returns the old "source" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldLocationID(ctx context.Context) (v int, err error) { +func (m *LocationMutation) OldSource(ctx context.Context) (v types.LocationSource, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldLocationID is only allowed on UpdateOne operations") + return v, errors.New("OldSource is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldLocationID requires an ID field in the mutation") + return v, errors.New("OldSource requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldLocationID: %w", err) + return v, fmt.Errorf("querying old value for OldSource: %w", err) } - return oldValue.LocationID, nil + return oldValue.Source, nil } -// ClearLocationID clears the value of the "location_id" field. -func (m *PkgMutation) ClearLocationID() { - m.location = nil - m.clearedFields[pkg.FieldLocationID] = struct{}{} +// ResetSource resets all changes to the "source" field. +func (m *LocationMutation) ResetSource() { + m.source = nil } -// LocationIDCleared returns if the "location_id" field was cleared in this mutation. -func (m *PkgMutation) LocationIDCleared() bool { - _, ok := m.clearedFields[pkg.FieldLocationID] - return ok +// SetPurpose sets the "purpose" field. +func (m *LocationMutation) SetPurpose(tp types.LocationPurpose) { + m.purpose = &tp } -// ResetLocationID resets all changes to the "location_id" field. -func (m *PkgMutation) ResetLocationID() { - m.location = nil - delete(m.clearedFields, pkg.FieldLocationID) +// Purpose returns the value of the "purpose" field in the mutation. +func (m *LocationMutation) Purpose() (r types.LocationPurpose, exists bool) { + v := m.purpose + if v == nil { + return + } + return *v, true } -// SetStatus sets the "status" field. -func (m *PkgMutation) SetStatus(ts types.PackageStatus) { - m.status = &ts +// OldPurpose returns the old "purpose" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *LocationMutation) OldPurpose(ctx context.Context) (v types.LocationPurpose, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPurpose is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPurpose requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPurpose: %w", err) + } + return oldValue.Purpose, nil } -// Status returns the value of the "status" field in the mutation. -func (m *PkgMutation) Status() (r types.PackageStatus, exists bool) { - v := m.status +// ResetPurpose resets all changes to the "purpose" field. +func (m *LocationMutation) ResetPurpose() { + m.purpose = nil +} + +// SetUUID sets the "uuid" field. +func (m *LocationMutation) SetUUID(u uuid.UUID) { + m.uuid = &u +} + +// UUID returns the value of the "uuid" field in the mutation. +func (m *LocationMutation) UUID() (r uuid.UUID, exists bool) { + v := m.uuid if v == nil { return } return *v, true } -// OldStatus returns the old "status" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldUUID returns the old "uuid" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldStatus(ctx context.Context) (v types.PackageStatus, err error) { +func (m *LocationMutation) OldUUID(ctx context.Context) (v uuid.UUID, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStatus is only allowed on UpdateOne operations") + return v, errors.New("OldUUID is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStatus requires an ID field in the mutation") + return v, errors.New("OldUUID requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldStatus: %w", err) + return v, fmt.Errorf("querying old value for OldUUID: %w", err) } - return oldValue.Status, nil + return oldValue.UUID, nil } -// ResetStatus resets all changes to the "status" field. -func (m *PkgMutation) ResetStatus() { - m.status = nil +// ResetUUID resets all changes to the "uuid" field. +func (m *LocationMutation) ResetUUID() { + m.uuid = nil } -// SetObjectKey sets the "object_key" field. -func (m *PkgMutation) SetObjectKey(u uuid.UUID) { - m.object_key = &u +// SetConfig sets the "config" field. +func (m *LocationMutation) SetConfig(tc types.LocationConfig) { + m._config = &tc } -// ObjectKey returns the value of the "object_key" field in the mutation. -func (m *PkgMutation) ObjectKey() (r uuid.UUID, exists bool) { - v := m.object_key +// Config returns the value of the "config" field in the mutation. +func (m *LocationMutation) Config() (r types.LocationConfig, exists bool) { + v := m._config if v == nil { return } return *v, true } -// OldObjectKey returns the old "object_key" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldConfig returns the old "config" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldObjectKey(ctx context.Context) (v uuid.UUID, err error) { +func (m *LocationMutation) OldConfig(ctx context.Context) (v types.LocationConfig, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldObjectKey is only allowed on UpdateOne operations") + return v, errors.New("OldConfig is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldObjectKey requires an ID field in the mutation") + return v, errors.New("OldConfig requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldObjectKey: %w", err) + return v, fmt.Errorf("querying old value for OldConfig: %w", err) } - return oldValue.ObjectKey, nil + return oldValue.Config, nil } -// ResetObjectKey resets all changes to the "object_key" field. -func (m *PkgMutation) ResetObjectKey() { - m.object_key = nil +// ResetConfig resets all changes to the "config" field. +func (m *LocationMutation) ResetConfig() { + m._config = nil } // SetCreatedAt sets the "created_at" field. -func (m *PkgMutation) SetCreatedAt(t time.Time) { +func (m *LocationMutation) SetCreatedAt(t time.Time) { m.created_at = &t } // CreatedAt returns the value of the "created_at" field in the mutation. -func (m *PkgMutation) CreatedAt() (r time.Time, exists bool) { +func (m *LocationMutation) CreatedAt() (r time.Time, exists bool) { v := m.created_at if v == nil { return @@ -1098,10 +1056,10 @@ func (m *PkgMutation) CreatedAt() (r time.Time, exists bool) { return *v, true } -// OldCreatedAt returns the old "created_at" field's value of the Pkg entity. -// If the Pkg object wasn't provided to the builder, the object is fetched from the database. +// OldCreatedAt returns the old "created_at" field's value of the Location entity. +// If the Location object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *PkgMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { +func (m *LocationMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -1116,46 +1074,73 @@ func (m *PkgMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) } // ResetCreatedAt resets all changes to the "created_at" field. -func (m *PkgMutation) ResetCreatedAt() { +func (m *LocationMutation) ResetCreatedAt() { m.created_at = nil } -// ClearLocation clears the "location" edge to the Location entity. -func (m *PkgMutation) ClearLocation() { - m.clearedlocation = true - m.clearedFields[pkg.FieldLocationID] = struct{}{} +// AddAipIDs adds the "aips" edge to the AIP entity by ids. +func (m *LocationMutation) AddAipIDs(ids ...int) { + if m.aips == nil { + m.aips = make(map[int]struct{}) + } + for i := range ids { + m.aips[ids[i]] = struct{}{} + } } -// LocationCleared reports if the "location" edge to the Location entity was cleared. -func (m *PkgMutation) LocationCleared() bool { - return m.LocationIDCleared() || m.clearedlocation +// ClearAips clears the "aips" edge to the AIP entity. +func (m *LocationMutation) ClearAips() { + m.clearedaips = true } -// LocationIDs returns the "location" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// LocationID instead. It exists only for internal usage by the builders. -func (m *PkgMutation) LocationIDs() (ids []int) { - if id := m.location; id != nil { - ids = append(ids, *id) +// AipsCleared reports if the "aips" edge to the AIP entity was cleared. +func (m *LocationMutation) AipsCleared() bool { + return m.clearedaips +} + +// RemoveAipIDs removes the "aips" edge to the AIP entity by IDs. +func (m *LocationMutation) RemoveAipIDs(ids ...int) { + if m.removedaips == nil { + m.removedaips = make(map[int]struct{}) + } + for i := range ids { + delete(m.aips, ids[i]) + m.removedaips[ids[i]] = struct{}{} + } +} + +// RemovedAips returns the removed IDs of the "aips" edge to the AIP entity. +func (m *LocationMutation) RemovedAipsIDs() (ids []int) { + for id := range m.removedaips { + ids = append(ids, id) } return } -// ResetLocation resets all changes to the "location" edge. -func (m *PkgMutation) ResetLocation() { - m.location = nil - m.clearedlocation = false +// AipsIDs returns the "aips" edge IDs in the mutation. +func (m *LocationMutation) AipsIDs() (ids []int) { + for id := range m.aips { + ids = append(ids, id) + } + return +} + +// ResetAips resets all changes to the "aips" edge. +func (m *LocationMutation) ResetAips() { + m.aips = nil + m.clearedaips = false + m.removedaips = nil } -// Where appends a list predicates to the PkgMutation builder. -func (m *PkgMutation) Where(ps ...predicate.Pkg) { +// Where appends a list predicates to the LocationMutation builder. +func (m *LocationMutation) Where(ps ...predicate.Location) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the PkgMutation builder. Using this method, +// WhereP appends storage-level predicates to the LocationMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *PkgMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.Pkg, len(ps)) +func (m *LocationMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Location, len(ps)) for i := range ps { p[i] = ps[i] } @@ -1163,42 +1148,45 @@ func (m *PkgMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *PkgMutation) Op() Op { +func (m *LocationMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *PkgMutation) SetOp(op Op) { +func (m *LocationMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (Pkg). -func (m *PkgMutation) Type() string { +// Type returns the node type of this mutation (Location). +func (m *LocationMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *PkgMutation) Fields() []string { - fields := make([]string, 0, 6) +func (m *LocationMutation) Fields() []string { + fields := make([]string, 0, 7) if m.name != nil { - fields = append(fields, pkg.FieldName) + fields = append(fields, location.FieldName) } - if m.aip_id != nil { - fields = append(fields, pkg.FieldAipID) + if m.description != nil { + fields = append(fields, location.FieldDescription) } - if m.location != nil { - fields = append(fields, pkg.FieldLocationID) + if m.source != nil { + fields = append(fields, location.FieldSource) } - if m.status != nil { - fields = append(fields, pkg.FieldStatus) + if m.purpose != nil { + fields = append(fields, location.FieldPurpose) } - if m.object_key != nil { - fields = append(fields, pkg.FieldObjectKey) + if m.uuid != nil { + fields = append(fields, location.FieldUUID) + } + if m._config != nil { + fields = append(fields, location.FieldConfig) } if m.created_at != nil { - fields = append(fields, pkg.FieldCreatedAt) + fields = append(fields, location.FieldCreatedAt) } return fields } @@ -1206,19 +1194,21 @@ func (m *PkgMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *PkgMutation) Field(name string) (ent.Value, bool) { +func (m *LocationMutation) Field(name string) (ent.Value, bool) { switch name { - case pkg.FieldName: + case location.FieldName: return m.Name() - case pkg.FieldAipID: - return m.AipID() - case pkg.FieldLocationID: - return m.LocationID() - case pkg.FieldStatus: - return m.Status() - case pkg.FieldObjectKey: - return m.ObjectKey() - case pkg.FieldCreatedAt: + case location.FieldDescription: + return m.Description() + case location.FieldSource: + return m.Source() + case location.FieldPurpose: + return m.Purpose() + case location.FieldUUID: + return m.UUID() + case location.FieldConfig: + return m.Config() + case location.FieldCreatedAt: return m.CreatedAt() } return nil, false @@ -1227,65 +1217,74 @@ func (m *PkgMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *PkgMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *LocationMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case pkg.FieldName: + case location.FieldName: return m.OldName(ctx) - case pkg.FieldAipID: - return m.OldAipID(ctx) - case pkg.FieldLocationID: - return m.OldLocationID(ctx) - case pkg.FieldStatus: - return m.OldStatus(ctx) - case pkg.FieldObjectKey: - return m.OldObjectKey(ctx) - case pkg.FieldCreatedAt: + case location.FieldDescription: + return m.OldDescription(ctx) + case location.FieldSource: + return m.OldSource(ctx) + case location.FieldPurpose: + return m.OldPurpose(ctx) + case location.FieldUUID: + return m.OldUUID(ctx) + case location.FieldConfig: + return m.OldConfig(ctx) + case location.FieldCreatedAt: return m.OldCreatedAt(ctx) } - return nil, fmt.Errorf("unknown Pkg field %s", name) + return nil, fmt.Errorf("unknown Location field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PkgMutation) SetField(name string, value ent.Value) error { +func (m *LocationMutation) SetField(name string, value ent.Value) error { switch name { - case pkg.FieldName: + case location.FieldName: v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } m.SetName(v) return nil - case pkg.FieldAipID: - v, ok := value.(uuid.UUID) + case location.FieldDescription: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetAipID(v) + m.SetDescription(v) return nil - case pkg.FieldLocationID: - v, ok := value.(int) + case location.FieldSource: + v, ok := value.(types.LocationSource) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetLocationID(v) + m.SetSource(v) return nil - case pkg.FieldStatus: - v, ok := value.(types.PackageStatus) + case location.FieldPurpose: + v, ok := value.(types.LocationPurpose) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetStatus(v) + m.SetPurpose(v) return nil - case pkg.FieldObjectKey: + case location.FieldUUID: v, ok := value.(uuid.UUID) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetObjectKey(v) + m.SetUUID(v) return nil - case pkg.FieldCreatedAt: + case location.FieldConfig: + v, ok := value.(types.LocationConfig) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConfig(v) + return nil + case location.FieldCreatedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) @@ -1293,158 +1292,159 @@ func (m *PkgMutation) SetField(name string, value ent.Value) error { m.SetCreatedAt(v) return nil } - return fmt.Errorf("unknown Pkg field %s", name) + return fmt.Errorf("unknown Location field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *PkgMutation) AddedFields() []string { - var fields []string - return fields +func (m *LocationMutation) AddedFields() []string { + return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *PkgMutation) AddedField(name string) (ent.Value, bool) { - switch name { - } +func (m *LocationMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *PkgMutation) AddField(name string, value ent.Value) error { +func (m *LocationMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown Pkg numeric field %s", name) + return fmt.Errorf("unknown Location numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *PkgMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(pkg.FieldLocationID) { - fields = append(fields, pkg.FieldLocationID) - } - return fields +func (m *LocationMutation) ClearedFields() []string { + return nil } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *PkgMutation) FieldCleared(name string) bool { +func (m *LocationMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *PkgMutation) ClearField(name string) error { - switch name { - case pkg.FieldLocationID: - m.ClearLocationID() - return nil - } - return fmt.Errorf("unknown Pkg nullable field %s", name) +func (m *LocationMutation) ClearField(name string) error { + return fmt.Errorf("unknown Location nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *PkgMutation) ResetField(name string) error { +func (m *LocationMutation) ResetField(name string) error { switch name { - case pkg.FieldName: + case location.FieldName: m.ResetName() return nil - case pkg.FieldAipID: - m.ResetAipID() + case location.FieldDescription: + m.ResetDescription() return nil - case pkg.FieldLocationID: - m.ResetLocationID() + case location.FieldSource: + m.ResetSource() return nil - case pkg.FieldStatus: - m.ResetStatus() + case location.FieldPurpose: + m.ResetPurpose() return nil - case pkg.FieldObjectKey: - m.ResetObjectKey() + case location.FieldUUID: + m.ResetUUID() + return nil + case location.FieldConfig: + m.ResetConfig() return nil - case pkg.FieldCreatedAt: + case location.FieldCreatedAt: m.ResetCreatedAt() return nil } - return fmt.Errorf("unknown Pkg field %s", name) + return fmt.Errorf("unknown Location field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *PkgMutation) AddedEdges() []string { +func (m *LocationMutation) AddedEdges() []string { edges := make([]string, 0, 1) - if m.location != nil { - edges = append(edges, pkg.EdgeLocation) + if m.aips != nil { + edges = append(edges, location.EdgeAips) } return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *PkgMutation) AddedIDs(name string) []ent.Value { +func (m *LocationMutation) AddedIDs(name string) []ent.Value { switch name { - case pkg.EdgeLocation: - if id := m.location; id != nil { - return []ent.Value{*id} + case location.EdgeAips: + ids := make([]ent.Value, 0, len(m.aips)) + for id := range m.aips { + ids = append(ids, id) } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *PkgMutation) RemovedEdges() []string { +func (m *LocationMutation) RemovedEdges() []string { edges := make([]string, 0, 1) + if m.removedaips != nil { + edges = append(edges, location.EdgeAips) + } return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *PkgMutation) RemovedIDs(name string) []ent.Value { +func (m *LocationMutation) RemovedIDs(name string) []ent.Value { + switch name { + case location.EdgeAips: + ids := make([]ent.Value, 0, len(m.removedaips)) + for id := range m.removedaips { + ids = append(ids, id) + } + return ids + } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *PkgMutation) ClearedEdges() []string { +func (m *LocationMutation) ClearedEdges() []string { edges := make([]string, 0, 1) - if m.clearedlocation { - edges = append(edges, pkg.EdgeLocation) + if m.clearedaips { + edges = append(edges, location.EdgeAips) } return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *PkgMutation) EdgeCleared(name string) bool { +func (m *LocationMutation) EdgeCleared(name string) bool { switch name { - case pkg.EdgeLocation: - return m.clearedlocation + case location.EdgeAips: + return m.clearedaips } return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *PkgMutation) ClearEdge(name string) error { +func (m *LocationMutation) ClearEdge(name string) error { switch name { - case pkg.EdgeLocation: - m.ClearLocation() - return nil } - return fmt.Errorf("unknown Pkg unique edge %s", name) + return fmt.Errorf("unknown Location unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *PkgMutation) ResetEdge(name string) error { +func (m *LocationMutation) ResetEdge(name string) error { switch name { - case pkg.EdgeLocation: - m.ResetLocation() + case location.EdgeAips: + m.ResetAips() return nil } - return fmt.Errorf("unknown Pkg edge %s", name) + return fmt.Errorf("unknown Location edge %s", name) } diff --git a/internal/storage/persistence/ent/db/pkg/where.go b/internal/storage/persistence/ent/db/pkg/where.go deleted file mode 100644 index 5899059f6..000000000 --- a/internal/storage/persistence/ent/db/pkg/where.go +++ /dev/null @@ -1,356 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package pkg - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" - "github.com/artefactual-sdps/enduro/internal/storage/types" - "github.com/google/uuid" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldID, id)) -} - -// Name applies equality check predicate on the "name" field. It's identical to NameEQ. -func Name(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldName, v)) -} - -// AipID applies equality check predicate on the "aip_id" field. It's identical to AipIDEQ. -func AipID(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldAipID, v)) -} - -// LocationID applies equality check predicate on the "location_id" field. It's identical to LocationIDEQ. -func LocationID(v int) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldLocationID, v)) -} - -// ObjectKey applies equality check predicate on the "object_key" field. It's identical to ObjectKeyEQ. -func ObjectKey(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldObjectKey, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldCreatedAt, v)) -} - -// NameEQ applies the EQ predicate on the "name" field. -func NameEQ(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldName, v)) -} - -// NameNEQ applies the NEQ predicate on the "name" field. -func NameNEQ(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldName, v)) -} - -// NameIn applies the In predicate on the "name" field. -func NameIn(vs ...string) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldName, vs...)) -} - -// NameNotIn applies the NotIn predicate on the "name" field. -func NameNotIn(vs ...string) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldName, vs...)) -} - -// NameGT applies the GT predicate on the "name" field. -func NameGT(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldName, v)) -} - -// NameGTE applies the GTE predicate on the "name" field. -func NameGTE(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldName, v)) -} - -// NameLT applies the LT predicate on the "name" field. -func NameLT(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldName, v)) -} - -// NameLTE applies the LTE predicate on the "name" field. -func NameLTE(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldName, v)) -} - -// NameContains applies the Contains predicate on the "name" field. -func NameContains(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldContains(FieldName, v)) -} - -// NameHasPrefix applies the HasPrefix predicate on the "name" field. -func NameHasPrefix(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldHasPrefix(FieldName, v)) -} - -// NameHasSuffix applies the HasSuffix predicate on the "name" field. -func NameHasSuffix(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldHasSuffix(FieldName, v)) -} - -// NameEqualFold applies the EqualFold predicate on the "name" field. -func NameEqualFold(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldEqualFold(FieldName, v)) -} - -// NameContainsFold applies the ContainsFold predicate on the "name" field. -func NameContainsFold(v string) predicate.Pkg { - return predicate.Pkg(sql.FieldContainsFold(FieldName, v)) -} - -// AipIDEQ applies the EQ predicate on the "aip_id" field. -func AipIDEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldAipID, v)) -} - -// AipIDNEQ applies the NEQ predicate on the "aip_id" field. -func AipIDNEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldAipID, v)) -} - -// AipIDIn applies the In predicate on the "aip_id" field. -func AipIDIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldAipID, vs...)) -} - -// AipIDNotIn applies the NotIn predicate on the "aip_id" field. -func AipIDNotIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldAipID, vs...)) -} - -// AipIDGT applies the GT predicate on the "aip_id" field. -func AipIDGT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldAipID, v)) -} - -// AipIDGTE applies the GTE predicate on the "aip_id" field. -func AipIDGTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldAipID, v)) -} - -// AipIDLT applies the LT predicate on the "aip_id" field. -func AipIDLT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldAipID, v)) -} - -// AipIDLTE applies the LTE predicate on the "aip_id" field. -func AipIDLTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldAipID, v)) -} - -// LocationIDEQ applies the EQ predicate on the "location_id" field. -func LocationIDEQ(v int) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldLocationID, v)) -} - -// LocationIDNEQ applies the NEQ predicate on the "location_id" field. -func LocationIDNEQ(v int) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldLocationID, v)) -} - -// LocationIDIn applies the In predicate on the "location_id" field. -func LocationIDIn(vs ...int) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldLocationID, vs...)) -} - -// LocationIDNotIn applies the NotIn predicate on the "location_id" field. -func LocationIDNotIn(vs ...int) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldLocationID, vs...)) -} - -// LocationIDIsNil applies the IsNil predicate on the "location_id" field. -func LocationIDIsNil() predicate.Pkg { - return predicate.Pkg(sql.FieldIsNull(FieldLocationID)) -} - -// LocationIDNotNil applies the NotNil predicate on the "location_id" field. -func LocationIDNotNil() predicate.Pkg { - return predicate.Pkg(sql.FieldNotNull(FieldLocationID)) -} - -// StatusEQ applies the EQ predicate on the "status" field. -func StatusEQ(v types.PackageStatus) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldStatus, v)) -} - -// StatusNEQ applies the NEQ predicate on the "status" field. -func StatusNEQ(v types.PackageStatus) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldStatus, v)) -} - -// StatusIn applies the In predicate on the "status" field. -func StatusIn(vs ...types.PackageStatus) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldStatus, vs...)) -} - -// StatusNotIn applies the NotIn predicate on the "status" field. -func StatusNotIn(vs ...types.PackageStatus) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldStatus, vs...)) -} - -// ObjectKeyEQ applies the EQ predicate on the "object_key" field. -func ObjectKeyEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldObjectKey, v)) -} - -// ObjectKeyNEQ applies the NEQ predicate on the "object_key" field. -func ObjectKeyNEQ(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldObjectKey, v)) -} - -// ObjectKeyIn applies the In predicate on the "object_key" field. -func ObjectKeyIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldObjectKey, vs...)) -} - -// ObjectKeyNotIn applies the NotIn predicate on the "object_key" field. -func ObjectKeyNotIn(vs ...uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldObjectKey, vs...)) -} - -// ObjectKeyGT applies the GT predicate on the "object_key" field. -func ObjectKeyGT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldObjectKey, v)) -} - -// ObjectKeyGTE applies the GTE predicate on the "object_key" field. -func ObjectKeyGTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldObjectKey, v)) -} - -// ObjectKeyLT applies the LT predicate on the "object_key" field. -func ObjectKeyLT(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldObjectKey, v)) -} - -// ObjectKeyLTE applies the LTE predicate on the "object_key" field. -func ObjectKeyLTE(v uuid.UUID) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldObjectKey, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.Pkg { - return predicate.Pkg(sql.FieldLTE(FieldCreatedAt, v)) -} - -// HasLocation applies the HasEdge predicate on the "location" edge. -func HasLocation() predicate.Pkg { - return predicate.Pkg(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, LocationTable, LocationColumn), - ) - sqlgraph.HasNeighbors(s, step) - }) -} - -// HasLocationWith applies the HasEdge predicate on the "location" edge with a given conditions (other predicates). -func HasLocationWith(preds ...predicate.Location) predicate.Pkg { - return predicate.Pkg(func(s *sql.Selector) { - step := newLocationStep() - sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { - for _, p := range preds { - p(s) - } - }) - }) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.Pkg) predicate.Pkg { - return predicate.Pkg(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.Pkg) predicate.Pkg { - return predicate.Pkg(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.Pkg) predicate.Pkg { - return predicate.Pkg(sql.NotPredicates(p)) -} diff --git a/internal/storage/persistence/ent/db/pkg_create.go b/internal/storage/persistence/ent/db/pkg_create.go deleted file mode 100644 index f80a2e7b7..000000000 --- a/internal/storage/persistence/ent/db/pkg_create.go +++ /dev/null @@ -1,298 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/storage/types" - "github.com/google/uuid" -) - -// PkgCreate is the builder for creating a Pkg entity. -type PkgCreate struct { - config - mutation *PkgMutation - hooks []Hook -} - -// SetName sets the "name" field. -func (pc *PkgCreate) SetName(s string) *PkgCreate { - pc.mutation.SetName(s) - return pc -} - -// SetAipID sets the "aip_id" field. -func (pc *PkgCreate) SetAipID(u uuid.UUID) *PkgCreate { - pc.mutation.SetAipID(u) - return pc -} - -// SetLocationID sets the "location_id" field. -func (pc *PkgCreate) SetLocationID(i int) *PkgCreate { - pc.mutation.SetLocationID(i) - return pc -} - -// SetNillableLocationID sets the "location_id" field if the given value is not nil. -func (pc *PkgCreate) SetNillableLocationID(i *int) *PkgCreate { - if i != nil { - pc.SetLocationID(*i) - } - return pc -} - -// SetStatus sets the "status" field. -func (pc *PkgCreate) SetStatus(ts types.PackageStatus) *PkgCreate { - pc.mutation.SetStatus(ts) - return pc -} - -// SetObjectKey sets the "object_key" field. -func (pc *PkgCreate) SetObjectKey(u uuid.UUID) *PkgCreate { - pc.mutation.SetObjectKey(u) - return pc -} - -// SetCreatedAt sets the "created_at" field. -func (pc *PkgCreate) SetCreatedAt(t time.Time) *PkgCreate { - pc.mutation.SetCreatedAt(t) - return pc -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (pc *PkgCreate) SetNillableCreatedAt(t *time.Time) *PkgCreate { - if t != nil { - pc.SetCreatedAt(*t) - } - return pc -} - -// SetLocation sets the "location" edge to the Location entity. -func (pc *PkgCreate) SetLocation(l *Location) *PkgCreate { - return pc.SetLocationID(l.ID) -} - -// Mutation returns the PkgMutation object of the builder. -func (pc *PkgCreate) Mutation() *PkgMutation { - return pc.mutation -} - -// Save creates the Pkg in the database. -func (pc *PkgCreate) Save(ctx context.Context) (*Pkg, error) { - pc.defaults() - return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (pc *PkgCreate) SaveX(ctx context.Context) *Pkg { - v, err := pc.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (pc *PkgCreate) Exec(ctx context.Context) error { - _, err := pc.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (pc *PkgCreate) ExecX(ctx context.Context) { - if err := pc.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (pc *PkgCreate) defaults() { - if _, ok := pc.mutation.CreatedAt(); !ok { - v := pkg.DefaultCreatedAt() - pc.mutation.SetCreatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (pc *PkgCreate) check() error { - if _, ok := pc.mutation.Name(); !ok { - return &ValidationError{Name: "name", err: errors.New(`db: missing required field "Pkg.name"`)} - } - if _, ok := pc.mutation.AipID(); !ok { - return &ValidationError{Name: "aip_id", err: errors.New(`db: missing required field "Pkg.aip_id"`)} - } - if _, ok := pc.mutation.Status(); !ok { - return &ValidationError{Name: "status", err: errors.New(`db: missing required field "Pkg.status"`)} - } - if v, ok := pc.mutation.Status(); ok { - if err := pkg.StatusValidator(v); err != nil { - return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "Pkg.status": %w`, err)} - } - } - if _, ok := pc.mutation.ObjectKey(); !ok { - return &ValidationError{Name: "object_key", err: errors.New(`db: missing required field "Pkg.object_key"`)} - } - if _, ok := pc.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "Pkg.created_at"`)} - } - return nil -} - -func (pc *PkgCreate) sqlSave(ctx context.Context) (*Pkg, error) { - if err := pc.check(); err != nil { - return nil, err - } - _node, _spec := pc.createSpec() - if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - pc.mutation.id = &_node.ID - pc.mutation.done = true - return _node, nil -} - -func (pc *PkgCreate) createSpec() (*Pkg, *sqlgraph.CreateSpec) { - var ( - _node = &Pkg{config: pc.config} - _spec = sqlgraph.NewCreateSpec(pkg.Table, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - ) - if value, ok := pc.mutation.Name(); ok { - _spec.SetField(pkg.FieldName, field.TypeString, value) - _node.Name = value - } - if value, ok := pc.mutation.AipID(); ok { - _spec.SetField(pkg.FieldAipID, field.TypeUUID, value) - _node.AipID = value - } - if value, ok := pc.mutation.Status(); ok { - _spec.SetField(pkg.FieldStatus, field.TypeEnum, value) - _node.Status = value - } - if value, ok := pc.mutation.ObjectKey(); ok { - _spec.SetField(pkg.FieldObjectKey, field.TypeUUID, value) - _node.ObjectKey = value - } - if value, ok := pc.mutation.CreatedAt(); ok { - _spec.SetField(pkg.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if nodes := pc.mutation.LocationIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: pkg.LocationTable, - Columns: []string{pkg.LocationColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _node.LocationID = nodes[0] - _spec.Edges = append(_spec.Edges, edge) - } - return _node, _spec -} - -// PkgCreateBulk is the builder for creating many Pkg entities in bulk. -type PkgCreateBulk struct { - config - err error - builders []*PkgCreate -} - -// Save creates the Pkg entities in the database. -func (pcb *PkgCreateBulk) Save(ctx context.Context) ([]*Pkg, error) { - if pcb.err != nil { - return nil, pcb.err - } - specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) - nodes := make([]*Pkg, len(pcb.builders)) - mutators := make([]Mutator, len(pcb.builders)) - for i := range pcb.builders { - func(i int, root context.Context) { - builder := pcb.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*PkgMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (pcb *PkgCreateBulk) SaveX(ctx context.Context) []*Pkg { - v, err := pcb.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (pcb *PkgCreateBulk) Exec(ctx context.Context) error { - _, err := pcb.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (pcb *PkgCreateBulk) ExecX(ctx context.Context) { - if err := pcb.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/internal/storage/persistence/ent/db/pkg_delete.go b/internal/storage/persistence/ent/db/pkg_delete.go deleted file mode 100644 index a347f7256..000000000 --- a/internal/storage/persistence/ent/db/pkg_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" -) - -// PkgDelete is the builder for deleting a Pkg entity. -type PkgDelete struct { - config - hooks []Hook - mutation *PkgMutation -} - -// Where appends a list predicates to the PkgDelete builder. -func (pd *PkgDelete) Where(ps ...predicate.Pkg) *PkgDelete { - pd.mutation.Where(ps...) - return pd -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (pd *PkgDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (pd *PkgDelete) ExecX(ctx context.Context) int { - n, err := pd.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (pd *PkgDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(pkg.Table, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - if ps := pd.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - pd.mutation.done = true - return affected, err -} - -// PkgDeleteOne is the builder for deleting a single Pkg entity. -type PkgDeleteOne struct { - pd *PkgDelete -} - -// Where appends a list predicates to the PkgDelete builder. -func (pdo *PkgDeleteOne) Where(ps ...predicate.Pkg) *PkgDeleteOne { - pdo.pd.mutation.Where(ps...) - return pdo -} - -// Exec executes the deletion query. -func (pdo *PkgDeleteOne) Exec(ctx context.Context) error { - n, err := pdo.pd.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{pkg.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (pdo *PkgDeleteOne) ExecX(ctx context.Context) { - if err := pdo.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/internal/storage/persistence/ent/db/pkg_query.go b/internal/storage/persistence/ent/db/pkg_query.go deleted file mode 100644 index 1542296e4..000000000 --- a/internal/storage/persistence/ent/db/pkg_query.go +++ /dev/null @@ -1,606 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" -) - -// PkgQuery is the builder for querying Pkg entities. -type PkgQuery struct { - config - ctx *QueryContext - order []pkg.OrderOption - inters []Interceptor - predicates []predicate.Pkg - withLocation *LocationQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the PkgQuery builder. -func (pq *PkgQuery) Where(ps ...predicate.Pkg) *PkgQuery { - pq.predicates = append(pq.predicates, ps...) - return pq -} - -// Limit the number of records to be returned by this query. -func (pq *PkgQuery) Limit(limit int) *PkgQuery { - pq.ctx.Limit = &limit - return pq -} - -// Offset to start from. -func (pq *PkgQuery) Offset(offset int) *PkgQuery { - pq.ctx.Offset = &offset - return pq -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (pq *PkgQuery) Unique(unique bool) *PkgQuery { - pq.ctx.Unique = &unique - return pq -} - -// Order specifies how the records should be ordered. -func (pq *PkgQuery) Order(o ...pkg.OrderOption) *PkgQuery { - pq.order = append(pq.order, o...) - return pq -} - -// QueryLocation chains the current query on the "location" edge. -func (pq *PkgQuery) QueryLocation() *LocationQuery { - query := (&LocationClient{config: pq.config}).Query() - query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := pq.prepareQuery(ctx); err != nil { - return nil, err - } - selector := pq.sqlQuery(ctx) - if err := selector.Err(); err != nil { - return nil, err - } - step := sqlgraph.NewStep( - sqlgraph.From(pkg.Table, pkg.FieldID, selector), - sqlgraph.To(location.Table, location.FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, pkg.LocationTable, pkg.LocationColumn), - ) - fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) - return fromU, nil - } - return query -} - -// First returns the first Pkg entity from the query. -// Returns a *NotFoundError when no Pkg was found. -func (pq *PkgQuery) First(ctx context.Context) (*Pkg, error) { - nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{pkg.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (pq *PkgQuery) FirstX(ctx context.Context) *Pkg { - node, err := pq.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first Pkg ID from the query. -// Returns a *NotFoundError when no Pkg ID was found. -func (pq *PkgQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{pkg.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (pq *PkgQuery) FirstIDX(ctx context.Context) int { - id, err := pq.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single Pkg entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one Pkg entity is found. -// Returns a *NotFoundError when no Pkg entities are found. -func (pq *PkgQuery) Only(ctx context.Context) (*Pkg, error) { - nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{pkg.Label} - default: - return nil, &NotSingularError{pkg.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (pq *PkgQuery) OnlyX(ctx context.Context) *Pkg { - node, err := pq.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only Pkg ID in the query. -// Returns a *NotSingularError when more than one Pkg ID is found. -// Returns a *NotFoundError when no entities are found. -func (pq *PkgQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{pkg.Label} - default: - err = &NotSingularError{pkg.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (pq *PkgQuery) OnlyIDX(ctx context.Context) int { - id, err := pq.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of Pkgs. -func (pq *PkgQuery) All(ctx context.Context) ([]*Pkg, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryAll) - if err := pq.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*Pkg, *PkgQuery]() - return withInterceptors[[]*Pkg](ctx, pq, qr, pq.inters) -} - -// AllX is like All, but panics if an error occurs. -func (pq *PkgQuery) AllX(ctx context.Context) []*Pkg { - nodes, err := pq.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of Pkg IDs. -func (pq *PkgQuery) IDs(ctx context.Context) (ids []int, err error) { - if pq.ctx.Unique == nil && pq.path != nil { - pq.Unique(true) - } - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryIDs) - if err = pq.Select(pkg.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (pq *PkgQuery) IDsX(ctx context.Context) []int { - ids, err := pq.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (pq *PkgQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryCount) - if err := pq.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, pq, querierCount[*PkgQuery](), pq.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (pq *PkgQuery) CountX(ctx context.Context) int { - count, err := pq.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (pq *PkgQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, pq.ctx, ent.OpQueryExist) - switch _, err := pq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("db: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (pq *PkgQuery) ExistX(ctx context.Context) bool { - exist, err := pq.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the PkgQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (pq *PkgQuery) Clone() *PkgQuery { - if pq == nil { - return nil - } - return &PkgQuery{ - config: pq.config, - ctx: pq.ctx.Clone(), - order: append([]pkg.OrderOption{}, pq.order...), - inters: append([]Interceptor{}, pq.inters...), - predicates: append([]predicate.Pkg{}, pq.predicates...), - withLocation: pq.withLocation.Clone(), - // clone intermediate query. - sql: pq.sql.Clone(), - path: pq.path, - } -} - -// WithLocation tells the query-builder to eager-load the nodes that are connected to -// the "location" edge. The optional arguments are used to configure the query builder of the edge. -func (pq *PkgQuery) WithLocation(opts ...func(*LocationQuery)) *PkgQuery { - query := (&LocationClient{config: pq.config}).Query() - for _, opt := range opts { - opt(query) - } - pq.withLocation = query - return pq -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// Name string `json:"name,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.Pkg.Query(). -// GroupBy(pkg.FieldName). -// Aggregate(db.Count()). -// Scan(ctx, &v) -func (pq *PkgQuery) GroupBy(field string, fields ...string) *PkgGroupBy { - pq.ctx.Fields = append([]string{field}, fields...) - grbuild := &PkgGroupBy{build: pq} - grbuild.flds = &pq.ctx.Fields - grbuild.label = pkg.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// Name string `json:"name,omitempty"` -// } -// -// client.Pkg.Query(). -// Select(pkg.FieldName). -// Scan(ctx, &v) -func (pq *PkgQuery) Select(fields ...string) *PkgSelect { - pq.ctx.Fields = append(pq.ctx.Fields, fields...) - sbuild := &PkgSelect{PkgQuery: pq} - sbuild.label = pkg.Label - sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a PkgSelect configured with the given aggregations. -func (pq *PkgQuery) Aggregate(fns ...AggregateFunc) *PkgSelect { - return pq.Select().Aggregate(fns...) -} - -func (pq *PkgQuery) prepareQuery(ctx context.Context) error { - for _, inter := range pq.inters { - if inter == nil { - return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, pq); err != nil { - return err - } - } - } - for _, f := range pq.ctx.Fields { - if !pkg.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} - } - } - if pq.path != nil { - prev, err := pq.path(ctx) - if err != nil { - return err - } - pq.sql = prev - } - return nil -} - -func (pq *PkgQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Pkg, error) { - var ( - nodes = []*Pkg{} - _spec = pq.querySpec() - loadedTypes = [1]bool{ - pq.withLocation != nil, - } - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*Pkg).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &Pkg{config: pq.config} - nodes = append(nodes, node) - node.Edges.loadedTypes = loadedTypes - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - if query := pq.withLocation; query != nil { - if err := pq.loadLocation(ctx, query, nodes, nil, - func(n *Pkg, e *Location) { n.Edges.Location = e }); err != nil { - return nil, err - } - } - return nodes, nil -} - -func (pq *PkgQuery) loadLocation(ctx context.Context, query *LocationQuery, nodes []*Pkg, init func(*Pkg), assign func(*Pkg, *Location)) error { - ids := make([]int, 0, len(nodes)) - nodeids := make(map[int][]*Pkg) - for i := range nodes { - fk := nodes[i].LocationID - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - if len(ids) == 0 { - return nil - } - query.Where(location.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return err - } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return fmt.Errorf(`unexpected foreign-key "location_id" returned %v`, n.ID) - } - for i := range nodes { - assign(nodes[i], n) - } - } - return nil -} - -func (pq *PkgQuery) sqlCount(ctx context.Context) (int, error) { - _spec := pq.querySpec() - _spec.Node.Columns = pq.ctx.Fields - if len(pq.ctx.Fields) > 0 { - _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique - } - return sqlgraph.CountNodes(ctx, pq.driver, _spec) -} - -func (pq *PkgQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(pkg.Table, pkg.Columns, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - _spec.From = pq.sql - if unique := pq.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if pq.path != nil { - _spec.Unique = true - } - if fields := pq.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, pkg.FieldID) - for i := range fields { - if fields[i] != pkg.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - if pq.withLocation != nil { - _spec.Node.AddColumnOnce(pkg.FieldLocationID) - } - } - if ps := pq.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := pq.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := pq.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := pq.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (pq *PkgQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(pq.driver.Dialect()) - t1 := builder.Table(pkg.Table) - columns := pq.ctx.Fields - if len(columns) == 0 { - columns = pkg.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if pq.sql != nil { - selector = pq.sql - selector.Select(selector.Columns(columns...)...) - } - if pq.ctx.Unique != nil && *pq.ctx.Unique { - selector.Distinct() - } - for _, p := range pq.predicates { - p(selector) - } - for _, p := range pq.order { - p(selector) - } - if offset := pq.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := pq.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// PkgGroupBy is the group-by builder for Pkg entities. -type PkgGroupBy struct { - selector - build *PkgQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (pgb *PkgGroupBy) Aggregate(fns ...AggregateFunc) *PkgGroupBy { - pgb.fns = append(pgb.fns, fns...) - return pgb -} - -// Scan applies the selector query and scans the result into the given value. -func (pgb *PkgGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, pgb.build.ctx, ent.OpQueryGroupBy) - if err := pgb.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*PkgQuery, *PkgGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) -} - -func (pgb *PkgGroupBy) sqlScan(ctx context.Context, root *PkgQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(pgb.fns)) - for _, fn := range pgb.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) - for _, f := range *pgb.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*pgb.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// PkgSelect is the builder for selecting fields of Pkg entities. -type PkgSelect struct { - *PkgQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (ps *PkgSelect) Aggregate(fns ...AggregateFunc) *PkgSelect { - ps.fns = append(ps.fns, fns...) - return ps -} - -// Scan applies the selector query and scans the result into the given value. -func (ps *PkgSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ps.ctx, ent.OpQuerySelect) - if err := ps.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*PkgQuery, *PkgSelect](ctx, ps.PkgQuery, ps, ps.inters, v) -} - -func (ps *PkgSelect) sqlScan(ctx context.Context, root *PkgQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(ps.fns)) - for _, fn := range ps.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*ps.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := ps.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/internal/storage/persistence/ent/db/pkg_update.go b/internal/storage/persistence/ent/db/pkg_update.go deleted file mode 100644 index 6de8f4f4f..000000000 --- a/internal/storage/persistence/ent/db/pkg_update.go +++ /dev/null @@ -1,460 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package db - -import ( - "context" - "errors" - "fmt" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/predicate" - "github.com/artefactual-sdps/enduro/internal/storage/types" - "github.com/google/uuid" -) - -// PkgUpdate is the builder for updating Pkg entities. -type PkgUpdate struct { - config - hooks []Hook - mutation *PkgMutation -} - -// Where appends a list predicates to the PkgUpdate builder. -func (pu *PkgUpdate) Where(ps ...predicate.Pkg) *PkgUpdate { - pu.mutation.Where(ps...) - return pu -} - -// SetName sets the "name" field. -func (pu *PkgUpdate) SetName(s string) *PkgUpdate { - pu.mutation.SetName(s) - return pu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableName(s *string) *PkgUpdate { - if s != nil { - pu.SetName(*s) - } - return pu -} - -// SetAipID sets the "aip_id" field. -func (pu *PkgUpdate) SetAipID(u uuid.UUID) *PkgUpdate { - pu.mutation.SetAipID(u) - return pu -} - -// SetNillableAipID sets the "aip_id" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableAipID(u *uuid.UUID) *PkgUpdate { - if u != nil { - pu.SetAipID(*u) - } - return pu -} - -// SetLocationID sets the "location_id" field. -func (pu *PkgUpdate) SetLocationID(i int) *PkgUpdate { - pu.mutation.SetLocationID(i) - return pu -} - -// SetNillableLocationID sets the "location_id" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableLocationID(i *int) *PkgUpdate { - if i != nil { - pu.SetLocationID(*i) - } - return pu -} - -// ClearLocationID clears the value of the "location_id" field. -func (pu *PkgUpdate) ClearLocationID() *PkgUpdate { - pu.mutation.ClearLocationID() - return pu -} - -// SetStatus sets the "status" field. -func (pu *PkgUpdate) SetStatus(ts types.PackageStatus) *PkgUpdate { - pu.mutation.SetStatus(ts) - return pu -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableStatus(ts *types.PackageStatus) *PkgUpdate { - if ts != nil { - pu.SetStatus(*ts) - } - return pu -} - -// SetObjectKey sets the "object_key" field. -func (pu *PkgUpdate) SetObjectKey(u uuid.UUID) *PkgUpdate { - pu.mutation.SetObjectKey(u) - return pu -} - -// SetNillableObjectKey sets the "object_key" field if the given value is not nil. -func (pu *PkgUpdate) SetNillableObjectKey(u *uuid.UUID) *PkgUpdate { - if u != nil { - pu.SetObjectKey(*u) - } - return pu -} - -// SetLocation sets the "location" edge to the Location entity. -func (pu *PkgUpdate) SetLocation(l *Location) *PkgUpdate { - return pu.SetLocationID(l.ID) -} - -// Mutation returns the PkgMutation object of the builder. -func (pu *PkgUpdate) Mutation() *PkgMutation { - return pu.mutation -} - -// ClearLocation clears the "location" edge to the Location entity. -func (pu *PkgUpdate) ClearLocation() *PkgUpdate { - pu.mutation.ClearLocation() - return pu -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (pu *PkgUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (pu *PkgUpdate) SaveX(ctx context.Context) int { - affected, err := pu.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (pu *PkgUpdate) Exec(ctx context.Context) error { - _, err := pu.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (pu *PkgUpdate) ExecX(ctx context.Context) { - if err := pu.Exec(ctx); err != nil { - panic(err) - } -} - -// check runs all checks and user-defined validators on the builder. -func (pu *PkgUpdate) check() error { - if v, ok := pu.mutation.Status(); ok { - if err := pkg.StatusValidator(v); err != nil { - return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "Pkg.status": %w`, err)} - } - } - return nil -} - -func (pu *PkgUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := pu.check(); err != nil { - return n, err - } - _spec := sqlgraph.NewUpdateSpec(pkg.Table, pkg.Columns, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - if ps := pu.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := pu.mutation.Name(); ok { - _spec.SetField(pkg.FieldName, field.TypeString, value) - } - if value, ok := pu.mutation.AipID(); ok { - _spec.SetField(pkg.FieldAipID, field.TypeUUID, value) - } - if value, ok := pu.mutation.Status(); ok { - _spec.SetField(pkg.FieldStatus, field.TypeEnum, value) - } - if value, ok := pu.mutation.ObjectKey(); ok { - _spec.SetField(pkg.FieldObjectKey, field.TypeUUID, value) - } - if pu.mutation.LocationCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: pkg.LocationTable, - Columns: []string{pkg.LocationColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := pu.mutation.LocationIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: pkg.LocationTable, - Columns: []string{pkg.LocationColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{pkg.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - pu.mutation.done = true - return n, nil -} - -// PkgUpdateOne is the builder for updating a single Pkg entity. -type PkgUpdateOne struct { - config - fields []string - hooks []Hook - mutation *PkgMutation -} - -// SetName sets the "name" field. -func (puo *PkgUpdateOne) SetName(s string) *PkgUpdateOne { - puo.mutation.SetName(s) - return puo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableName(s *string) *PkgUpdateOne { - if s != nil { - puo.SetName(*s) - } - return puo -} - -// SetAipID sets the "aip_id" field. -func (puo *PkgUpdateOne) SetAipID(u uuid.UUID) *PkgUpdateOne { - puo.mutation.SetAipID(u) - return puo -} - -// SetNillableAipID sets the "aip_id" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableAipID(u *uuid.UUID) *PkgUpdateOne { - if u != nil { - puo.SetAipID(*u) - } - return puo -} - -// SetLocationID sets the "location_id" field. -func (puo *PkgUpdateOne) SetLocationID(i int) *PkgUpdateOne { - puo.mutation.SetLocationID(i) - return puo -} - -// SetNillableLocationID sets the "location_id" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableLocationID(i *int) *PkgUpdateOne { - if i != nil { - puo.SetLocationID(*i) - } - return puo -} - -// ClearLocationID clears the value of the "location_id" field. -func (puo *PkgUpdateOne) ClearLocationID() *PkgUpdateOne { - puo.mutation.ClearLocationID() - return puo -} - -// SetStatus sets the "status" field. -func (puo *PkgUpdateOne) SetStatus(ts types.PackageStatus) *PkgUpdateOne { - puo.mutation.SetStatus(ts) - return puo -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableStatus(ts *types.PackageStatus) *PkgUpdateOne { - if ts != nil { - puo.SetStatus(*ts) - } - return puo -} - -// SetObjectKey sets the "object_key" field. -func (puo *PkgUpdateOne) SetObjectKey(u uuid.UUID) *PkgUpdateOne { - puo.mutation.SetObjectKey(u) - return puo -} - -// SetNillableObjectKey sets the "object_key" field if the given value is not nil. -func (puo *PkgUpdateOne) SetNillableObjectKey(u *uuid.UUID) *PkgUpdateOne { - if u != nil { - puo.SetObjectKey(*u) - } - return puo -} - -// SetLocation sets the "location" edge to the Location entity. -func (puo *PkgUpdateOne) SetLocation(l *Location) *PkgUpdateOne { - return puo.SetLocationID(l.ID) -} - -// Mutation returns the PkgMutation object of the builder. -func (puo *PkgUpdateOne) Mutation() *PkgMutation { - return puo.mutation -} - -// ClearLocation clears the "location" edge to the Location entity. -func (puo *PkgUpdateOne) ClearLocation() *PkgUpdateOne { - puo.mutation.ClearLocation() - return puo -} - -// Where appends a list predicates to the PkgUpdate builder. -func (puo *PkgUpdateOne) Where(ps ...predicate.Pkg) *PkgUpdateOne { - puo.mutation.Where(ps...) - return puo -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (puo *PkgUpdateOne) Select(field string, fields ...string) *PkgUpdateOne { - puo.fields = append([]string{field}, fields...) - return puo -} - -// Save executes the query and returns the updated Pkg entity. -func (puo *PkgUpdateOne) Save(ctx context.Context) (*Pkg, error) { - return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (puo *PkgUpdateOne) SaveX(ctx context.Context) *Pkg { - node, err := puo.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (puo *PkgUpdateOne) Exec(ctx context.Context) error { - _, err := puo.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (puo *PkgUpdateOne) ExecX(ctx context.Context) { - if err := puo.Exec(ctx); err != nil { - panic(err) - } -} - -// check runs all checks and user-defined validators on the builder. -func (puo *PkgUpdateOne) check() error { - if v, ok := puo.mutation.Status(); ok { - if err := pkg.StatusValidator(v); err != nil { - return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "Pkg.status": %w`, err)} - } - } - return nil -} - -func (puo *PkgUpdateOne) sqlSave(ctx context.Context) (_node *Pkg, err error) { - if err := puo.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(pkg.Table, pkg.Columns, sqlgraph.NewFieldSpec(pkg.FieldID, field.TypeInt)) - id, ok := puo.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "Pkg.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := puo.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, pkg.FieldID) - for _, f := range fields { - if !pkg.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} - } - if f != pkg.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := puo.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := puo.mutation.Name(); ok { - _spec.SetField(pkg.FieldName, field.TypeString, value) - } - if value, ok := puo.mutation.AipID(); ok { - _spec.SetField(pkg.FieldAipID, field.TypeUUID, value) - } - if value, ok := puo.mutation.Status(); ok { - _spec.SetField(pkg.FieldStatus, field.TypeEnum, value) - } - if value, ok := puo.mutation.ObjectKey(); ok { - _spec.SetField(pkg.FieldObjectKey, field.TypeUUID, value) - } - if puo.mutation.LocationCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: pkg.LocationTable, - Columns: []string{pkg.LocationColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := puo.mutation.LocationIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: pkg.LocationTable, - Columns: []string{pkg.LocationColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - _node = &Pkg{config: puo.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{pkg.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - puo.mutation.done = true - return _node, nil -} diff --git a/internal/storage/persistence/ent/db/predicate/predicate.go b/internal/storage/persistence/ent/db/predicate/predicate.go index 5409d2a0b..fecc5ef32 100644 --- a/internal/storage/persistence/ent/db/predicate/predicate.go +++ b/internal/storage/persistence/ent/db/predicate/predicate.go @@ -6,8 +6,8 @@ import ( "entgo.io/ent/dialect/sql" ) +// AIP is the predicate function for aip builders. +type AIP func(*sql.Selector) + // Location is the predicate function for location builders. type Location func(*sql.Selector) - -// Pkg is the predicate function for pkg builders. -type Pkg func(*sql.Selector) diff --git a/internal/storage/persistence/ent/db/runtime.go b/internal/storage/persistence/ent/db/runtime.go index 4b6647d29..c91a521ac 100644 --- a/internal/storage/persistence/ent/db/runtime.go +++ b/internal/storage/persistence/ent/db/runtime.go @@ -5,8 +5,8 @@ package db import ( "time" + "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/aip" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/location" - "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/db/pkg" "github.com/artefactual-sdps/enduro/internal/storage/persistence/ent/schema" ) @@ -14,16 +14,16 @@ import ( // (default values, validators, hooks and policies) and stitches it // to their package variables. func init() { + aipFields := schema.AIP{}.Fields() + _ = aipFields + // aipDescCreatedAt is the schema descriptor for created_at field. + aipDescCreatedAt := aipFields[5].Descriptor() + // aip.DefaultCreatedAt holds the default value on creation for the created_at field. + aip.DefaultCreatedAt = aipDescCreatedAt.Default.(func() time.Time) locationFields := schema.Location{}.Fields() _ = locationFields // locationDescCreatedAt is the schema descriptor for created_at field. locationDescCreatedAt := locationFields[6].Descriptor() // location.DefaultCreatedAt holds the default value on creation for the created_at field. location.DefaultCreatedAt = locationDescCreatedAt.Default.(func() time.Time) - pkgFields := schema.Pkg{}.Fields() - _ = pkgFields - // pkgDescCreatedAt is the schema descriptor for created_at field. - pkgDescCreatedAt := pkgFields[5].Descriptor() - // pkg.DefaultCreatedAt holds the default value on creation for the created_at field. - pkg.DefaultCreatedAt = pkgDescCreatedAt.Default.(func() time.Time) } diff --git a/internal/storage/persistence/ent/db/tx.go b/internal/storage/persistence/ent/db/tx.go index 9d498754b..f4171c56a 100644 --- a/internal/storage/persistence/ent/db/tx.go +++ b/internal/storage/persistence/ent/db/tx.go @@ -12,10 +12,10 @@ import ( // Tx is a transactional client that is created by calling Client.Tx(). type Tx struct { config + // AIP is the client for interacting with the AIP builders. + AIP *AIPClient // Location is the client for interacting with the Location builders. Location *LocationClient - // Pkg is the client for interacting with the Pkg builders. - Pkg *PkgClient // lazily loaded. client *Client @@ -147,8 +147,8 @@ func (tx *Tx) Client() *Client { } func (tx *Tx) init() { + tx.AIP = NewAIPClient(tx.config) tx.Location = NewLocationClient(tx.config) - tx.Pkg = NewPkgClient(tx.config) } // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. @@ -158,7 +158,7 @@ func (tx *Tx) init() { // of them in order to commit or rollback the transaction. // // If a closed transaction is embedded in one of the generated entities, and the entity -// applies a query, for example: Location.QueryXXX(), the query will be executed +// applies a query, for example: AIP.QueryXXX(), the query will be executed // through the driver which created this transaction. // // Note that txDriver is not goroutine safe. diff --git a/internal/storage/persistence/ent/schema/pkg.go b/internal/storage/persistence/ent/schema/aip.go similarity index 68% rename from internal/storage/persistence/ent/schema/pkg.go rename to internal/storage/persistence/ent/schema/aip.go index 9015f3da4..6fa7f3975 100644 --- a/internal/storage/persistence/ent/schema/pkg.go +++ b/internal/storage/persistence/ent/schema/aip.go @@ -14,20 +14,20 @@ import ( "github.com/artefactual-sdps/enduro/internal/storage/types" ) -// Pkg holds the schema definition for the Pkg entity. -type Pkg struct { +// AIP holds the schema definition for the AIP entity. +type AIP struct { ent.Schema } -// Annotations of the Pkg. -func (Pkg) Annotations() []schema.Annotation { +// Annotations of the AIP. +func (AIP) Annotations() []schema.Annotation { return []schema.Annotation{ - entsql.Annotation{Table: "package"}, + entsql.Annotation{Table: "aip"}, } } -// Fields of the Pkg. -func (Pkg) Fields() []ent.Field { +// Fields of the AIP. +func (AIP) Fields() []ent.Field { return []ent.Field{ field.String("name"). Annotations(entsql.Annotation{ @@ -38,7 +38,7 @@ func (Pkg) Fields() []ent.Field { field.Int("location_id"). Optional(), field.Enum("status"). - GoType(types.StatusUnspecified), + GoType(types.AIPStatusUnspecified), field.UUID("object_key", uuid.UUID{}). Unique(), field.Time("created_at"). @@ -47,8 +47,8 @@ func (Pkg) Fields() []ent.Field { } } -// Edges of the Pkg. -func (Pkg) Edges() []ent.Edge { +// Edges of the AIP. +func (AIP) Edges() []ent.Edge { return []ent.Edge{ edge.To("location", Location.Type). Field("location_id"). @@ -56,8 +56,8 @@ func (Pkg) Edges() []ent.Edge { } } -// Indexes of the Pkg. -func (Pkg) Indexes() []ent.Index { +// Indexes of the AIP. +func (AIP) Indexes() []ent.Index { return []ent.Index{ index.Fields("aip_id"), index.Fields("object_key"), diff --git a/internal/storage/persistence/ent/schema/location.go b/internal/storage/persistence/ent/schema/location.go index b100e51c4..1c087bf83 100644 --- a/internal/storage/persistence/ent/schema/location.go +++ b/internal/storage/persistence/ent/schema/location.go @@ -53,7 +53,7 @@ func (Location) Fields() []ent.Field { // Edges of the Location. func (Location) Edges() []ent.Edge { return []ent.Edge{ - edge.From("packages", Pkg.Type). + edge.From("aips", AIP.Type). Ref("location"), } } diff --git a/internal/storage/persistence/fake/mock_persistence.go b/internal/storage/persistence/fake/mock_persistence.go index df90ca371..4b7a7b2c0 100644 --- a/internal/storage/persistence/fake/mock_persistence.go +++ b/internal/storage/persistence/fake/mock_persistence.go @@ -42,6 +42,45 @@ func (m *MockStorage) EXPECT() *MockStorageMockRecorder { return m.recorder } +// CreateAIP mocks base method. +func (m *MockStorage) CreateAIP(arg0 context.Context, arg1 *storage.Package) (*storage.Package, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAIP", arg0, arg1) + ret0, _ := ret[0].(*storage.Package) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAIP indicates an expected call of CreateAIP. +func (mr *MockStorageMockRecorder) CreateAIP(arg0, arg1 any) *MockStorageCreateAIPCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAIP", reflect.TypeOf((*MockStorage)(nil).CreateAIP), arg0, arg1) + return &MockStorageCreateAIPCall{Call: call} +} + +// MockStorageCreateAIPCall wrap *gomock.Call +type MockStorageCreateAIPCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStorageCreateAIPCall) Return(arg0 *storage.Package, arg1 error) *MockStorageCreateAIPCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStorageCreateAIPCall) Do(f func(context.Context, *storage.Package) (*storage.Package, error)) *MockStorageCreateAIPCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStorageCreateAIPCall) DoAndReturn(f func(context.Context, *storage.Package) (*storage.Package, error)) *MockStorageCreateAIPCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // CreateLocation mocks base method. func (m *MockStorage) CreateLocation(arg0 context.Context, arg1 *storage.Location, arg2 *types.LocationConfig) (*storage.Location, error) { m.ctrl.T.Helper() @@ -81,41 +120,41 @@ func (c *MockStorageCreateLocationCall) DoAndReturn(f func(context.Context, *sto return c } -// CreatePackage mocks base method. -func (m *MockStorage) CreatePackage(arg0 context.Context, arg1 *storage.Package) (*storage.Package, error) { +// ListAIPs mocks base method. +func (m *MockStorage) ListAIPs(arg0 context.Context) (storage.PackageCollection, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreatePackage", arg0, arg1) - ret0, _ := ret[0].(*storage.Package) + ret := m.ctrl.Call(m, "ListAIPs", arg0) + ret0, _ := ret[0].(storage.PackageCollection) ret1, _ := ret[1].(error) return ret0, ret1 } -// CreatePackage indicates an expected call of CreatePackage. -func (mr *MockStorageMockRecorder) CreatePackage(arg0, arg1 any) *MockStorageCreatePackageCall { +// ListAIPs indicates an expected call of ListAIPs. +func (mr *MockStorageMockRecorder) ListAIPs(arg0 any) *MockStorageListAIPsCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePackage", reflect.TypeOf((*MockStorage)(nil).CreatePackage), arg0, arg1) - return &MockStorageCreatePackageCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAIPs", reflect.TypeOf((*MockStorage)(nil).ListAIPs), arg0) + return &MockStorageListAIPsCall{Call: call} } -// MockStorageCreatePackageCall wrap *gomock.Call -type MockStorageCreatePackageCall struct { +// MockStorageListAIPsCall wrap *gomock.Call +type MockStorageListAIPsCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockStorageCreatePackageCall) Return(arg0 *storage.Package, arg1 error) *MockStorageCreatePackageCall { +func (c *MockStorageListAIPsCall) Return(arg0 storage.PackageCollection, arg1 error) *MockStorageListAIPsCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockStorageCreatePackageCall) Do(f func(context.Context, *storage.Package) (*storage.Package, error)) *MockStorageCreatePackageCall { +func (c *MockStorageListAIPsCall) Do(f func(context.Context) (storage.PackageCollection, error)) *MockStorageListAIPsCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockStorageCreatePackageCall) DoAndReturn(f func(context.Context, *storage.Package) (*storage.Package, error)) *MockStorageCreatePackageCall { +func (c *MockStorageListAIPsCall) DoAndReturn(f func(context.Context) (storage.PackageCollection, error)) *MockStorageListAIPsCall { c.Call = c.Call.DoAndReturn(f) return c } @@ -159,80 +198,80 @@ func (c *MockStorageListLocationsCall) DoAndReturn(f func(context.Context) (stor return c } -// ListPackages mocks base method. -func (m *MockStorage) ListPackages(arg0 context.Context) (storage.PackageCollection, error) { +// LocationAIPs mocks base method. +func (m *MockStorage) LocationAIPs(arg0 context.Context, arg1 uuid.UUID) (storage.PackageCollection, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPackages", arg0) + ret := m.ctrl.Call(m, "LocationAIPs", arg0, arg1) ret0, _ := ret[0].(storage.PackageCollection) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListPackages indicates an expected call of ListPackages. -func (mr *MockStorageMockRecorder) ListPackages(arg0 any) *MockStorageListPackagesCall { +// LocationAIPs indicates an expected call of LocationAIPs. +func (mr *MockStorageMockRecorder) LocationAIPs(arg0, arg1 any) *MockStorageLocationAIPsCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPackages", reflect.TypeOf((*MockStorage)(nil).ListPackages), arg0) - return &MockStorageListPackagesCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocationAIPs", reflect.TypeOf((*MockStorage)(nil).LocationAIPs), arg0, arg1) + return &MockStorageLocationAIPsCall{Call: call} } -// MockStorageListPackagesCall wrap *gomock.Call -type MockStorageListPackagesCall struct { +// MockStorageLocationAIPsCall wrap *gomock.Call +type MockStorageLocationAIPsCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockStorageListPackagesCall) Return(arg0 storage.PackageCollection, arg1 error) *MockStorageListPackagesCall { +func (c *MockStorageLocationAIPsCall) Return(arg0 storage.PackageCollection, arg1 error) *MockStorageLocationAIPsCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockStorageListPackagesCall) Do(f func(context.Context) (storage.PackageCollection, error)) *MockStorageListPackagesCall { +func (c *MockStorageLocationAIPsCall) Do(f func(context.Context, uuid.UUID) (storage.PackageCollection, error)) *MockStorageLocationAIPsCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockStorageListPackagesCall) DoAndReturn(f func(context.Context) (storage.PackageCollection, error)) *MockStorageListPackagesCall { +func (c *MockStorageLocationAIPsCall) DoAndReturn(f func(context.Context, uuid.UUID) (storage.PackageCollection, error)) *MockStorageLocationAIPsCall { c.Call = c.Call.DoAndReturn(f) return c } -// LocationPackages mocks base method. -func (m *MockStorage) LocationPackages(arg0 context.Context, arg1 uuid.UUID) (storage.PackageCollection, error) { +// ReadAIP mocks base method. +func (m *MockStorage) ReadAIP(arg0 context.Context, arg1 uuid.UUID) (*storage.Package, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LocationPackages", arg0, arg1) - ret0, _ := ret[0].(storage.PackageCollection) + ret := m.ctrl.Call(m, "ReadAIP", arg0, arg1) + ret0, _ := ret[0].(*storage.Package) ret1, _ := ret[1].(error) return ret0, ret1 } -// LocationPackages indicates an expected call of LocationPackages. -func (mr *MockStorageMockRecorder) LocationPackages(arg0, arg1 any) *MockStorageLocationPackagesCall { +// ReadAIP indicates an expected call of ReadAIP. +func (mr *MockStorageMockRecorder) ReadAIP(arg0, arg1 any) *MockStorageReadAIPCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocationPackages", reflect.TypeOf((*MockStorage)(nil).LocationPackages), arg0, arg1) - return &MockStorageLocationPackagesCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadAIP", reflect.TypeOf((*MockStorage)(nil).ReadAIP), arg0, arg1) + return &MockStorageReadAIPCall{Call: call} } -// MockStorageLocationPackagesCall wrap *gomock.Call -type MockStorageLocationPackagesCall struct { +// MockStorageReadAIPCall wrap *gomock.Call +type MockStorageReadAIPCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockStorageLocationPackagesCall) Return(arg0 storage.PackageCollection, arg1 error) *MockStorageLocationPackagesCall { +func (c *MockStorageReadAIPCall) Return(arg0 *storage.Package, arg1 error) *MockStorageReadAIPCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockStorageLocationPackagesCall) Do(f func(context.Context, uuid.UUID) (storage.PackageCollection, error)) *MockStorageLocationPackagesCall { +func (c *MockStorageReadAIPCall) Do(f func(context.Context, uuid.UUID) (*storage.Package, error)) *MockStorageReadAIPCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockStorageLocationPackagesCall) DoAndReturn(f func(context.Context, uuid.UUID) (storage.PackageCollection, error)) *MockStorageLocationPackagesCall { +func (c *MockStorageReadAIPCall) DoAndReturn(f func(context.Context, uuid.UUID) (*storage.Package, error)) *MockStorageReadAIPCall { c.Call = c.Call.DoAndReturn(f) return c } @@ -276,117 +315,78 @@ func (c *MockStorageReadLocationCall) DoAndReturn(f func(context.Context, uuid.U return c } -// ReadPackage mocks base method. -func (m *MockStorage) ReadPackage(arg0 context.Context, arg1 uuid.UUID) (*storage.Package, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadPackage", arg0, arg1) - ret0, _ := ret[0].(*storage.Package) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadPackage indicates an expected call of ReadPackage. -func (mr *MockStorageMockRecorder) ReadPackage(arg0, arg1 any) *MockStorageReadPackageCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadPackage", reflect.TypeOf((*MockStorage)(nil).ReadPackage), arg0, arg1) - return &MockStorageReadPackageCall{Call: call} -} - -// MockStorageReadPackageCall wrap *gomock.Call -type MockStorageReadPackageCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockStorageReadPackageCall) Return(arg0 *storage.Package, arg1 error) *MockStorageReadPackageCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockStorageReadPackageCall) Do(f func(context.Context, uuid.UUID) (*storage.Package, error)) *MockStorageReadPackageCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockStorageReadPackageCall) DoAndReturn(f func(context.Context, uuid.UUID) (*storage.Package, error)) *MockStorageReadPackageCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// UpdatePackageLocationID mocks base method. -func (m *MockStorage) UpdatePackageLocationID(arg0 context.Context, arg1, arg2 uuid.UUID) error { +// UpdateAIPLocationID mocks base method. +func (m *MockStorage) UpdateAIPLocationID(arg0 context.Context, arg1, arg2 uuid.UUID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdatePackageLocationID", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "UpdateAIPLocationID", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } -// UpdatePackageLocationID indicates an expected call of UpdatePackageLocationID. -func (mr *MockStorageMockRecorder) UpdatePackageLocationID(arg0, arg1, arg2 any) *MockStorageUpdatePackageLocationIDCall { +// UpdateAIPLocationID indicates an expected call of UpdateAIPLocationID. +func (mr *MockStorageMockRecorder) UpdateAIPLocationID(arg0, arg1, arg2 any) *MockStorageUpdateAIPLocationIDCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePackageLocationID", reflect.TypeOf((*MockStorage)(nil).UpdatePackageLocationID), arg0, arg1, arg2) - return &MockStorageUpdatePackageLocationIDCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAIPLocationID", reflect.TypeOf((*MockStorage)(nil).UpdateAIPLocationID), arg0, arg1, arg2) + return &MockStorageUpdateAIPLocationIDCall{Call: call} } -// MockStorageUpdatePackageLocationIDCall wrap *gomock.Call -type MockStorageUpdatePackageLocationIDCall struct { +// MockStorageUpdateAIPLocationIDCall wrap *gomock.Call +type MockStorageUpdateAIPLocationIDCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockStorageUpdatePackageLocationIDCall) Return(arg0 error) *MockStorageUpdatePackageLocationIDCall { +func (c *MockStorageUpdateAIPLocationIDCall) Return(arg0 error) *MockStorageUpdateAIPLocationIDCall { c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockStorageUpdatePackageLocationIDCall) Do(f func(context.Context, uuid.UUID, uuid.UUID) error) *MockStorageUpdatePackageLocationIDCall { +func (c *MockStorageUpdateAIPLocationIDCall) Do(f func(context.Context, uuid.UUID, uuid.UUID) error) *MockStorageUpdateAIPLocationIDCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockStorageUpdatePackageLocationIDCall) DoAndReturn(f func(context.Context, uuid.UUID, uuid.UUID) error) *MockStorageUpdatePackageLocationIDCall { +func (c *MockStorageUpdateAIPLocationIDCall) DoAndReturn(f func(context.Context, uuid.UUID, uuid.UUID) error) *MockStorageUpdateAIPLocationIDCall { c.Call = c.Call.DoAndReturn(f) return c } -// UpdatePackageStatus mocks base method. -func (m *MockStorage) UpdatePackageStatus(arg0 context.Context, arg1 uuid.UUID, arg2 types.PackageStatus) error { +// UpdateAIPStatus mocks base method. +func (m *MockStorage) UpdateAIPStatus(arg0 context.Context, arg1 uuid.UUID, arg2 types.AIPStatus) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdatePackageStatus", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "UpdateAIPStatus", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } -// UpdatePackageStatus indicates an expected call of UpdatePackageStatus. -func (mr *MockStorageMockRecorder) UpdatePackageStatus(arg0, arg1, arg2 any) *MockStorageUpdatePackageStatusCall { +// UpdateAIPStatus indicates an expected call of UpdateAIPStatus. +func (mr *MockStorageMockRecorder) UpdateAIPStatus(arg0, arg1, arg2 any) *MockStorageUpdateAIPStatusCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePackageStatus", reflect.TypeOf((*MockStorage)(nil).UpdatePackageStatus), arg0, arg1, arg2) - return &MockStorageUpdatePackageStatusCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAIPStatus", reflect.TypeOf((*MockStorage)(nil).UpdateAIPStatus), arg0, arg1, arg2) + return &MockStorageUpdateAIPStatusCall{Call: call} } -// MockStorageUpdatePackageStatusCall wrap *gomock.Call -type MockStorageUpdatePackageStatusCall struct { +// MockStorageUpdateAIPStatusCall wrap *gomock.Call +type MockStorageUpdateAIPStatusCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockStorageUpdatePackageStatusCall) Return(arg0 error) *MockStorageUpdatePackageStatusCall { +func (c *MockStorageUpdateAIPStatusCall) Return(arg0 error) *MockStorageUpdateAIPStatusCall { c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockStorageUpdatePackageStatusCall) Do(f func(context.Context, uuid.UUID, types.PackageStatus) error) *MockStorageUpdatePackageStatusCall { +func (c *MockStorageUpdateAIPStatusCall) Do(f func(context.Context, uuid.UUID, types.AIPStatus) error) *MockStorageUpdateAIPStatusCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockStorageUpdatePackageStatusCall) DoAndReturn(f func(context.Context, uuid.UUID, types.PackageStatus) error) *MockStorageUpdatePackageStatusCall { +func (c *MockStorageUpdateAIPStatusCall) DoAndReturn(f func(context.Context, uuid.UUID, types.AIPStatus) error) *MockStorageUpdateAIPStatusCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/internal/storage/persistence/migrations/20250211024602_rename_package_table.down.sql b/internal/storage/persistence/migrations/20250211024602_rename_package_table.down.sql new file mode 100644 index 000000000..38be8bbd9 --- /dev/null +++ b/internal/storage/persistence/migrations/20250211024602_rename_package_table.down.sql @@ -0,0 +1,4 @@ +-- reverse: modify "aip" table +ALTER TABLE `aip` DROP FOREIGN KEY `aip_location_location`, ADD CONSTRAINT `package_location_location` FOREIGN KEY (`location_id`) REFERENCES `location` (`id`) ON UPDATE NO ACTION ON DELETE SET NULL, DROP INDEX `aip_object_key`, DROP INDEX `aip_location_location`, DROP INDEX `aip_aip_id`, ADD INDEX `pkg_object_key` (`object_key`), ADD INDEX `pkg_aip_id` (`aip_id`); +-- reverse: rename "package" table +RENAME TABLE `aip` TO `package`; diff --git a/internal/storage/persistence/migrations/20250211024602_rename_package_table.up.sql b/internal/storage/persistence/migrations/20250211024602_rename_package_table.up.sql new file mode 100644 index 000000000..486054d0f --- /dev/null +++ b/internal/storage/persistence/migrations/20250211024602_rename_package_table.up.sql @@ -0,0 +1,4 @@ +-- rename "package" table +RENAME TABLE `package` TO `aip`; +-- modify "aip" table +ALTER TABLE `aip` DROP INDEX `pkg_aip_id`, DROP INDEX `pkg_object_key`, ADD INDEX `aip_aip_id` (`aip_id`), ADD INDEX `aip_location_location` (`location_id`), ADD INDEX `aip_object_key` (`object_key`), DROP FOREIGN KEY `package_location_location`, ADD CONSTRAINT `aip_location_location` FOREIGN KEY (`location_id`) REFERENCES `location` (`id`) ON UPDATE NO ACTION ON DELETE SET NULL; diff --git a/internal/storage/persistence/migrations/atlas.sum b/internal/storage/persistence/migrations/atlas.sum index 64ecc3399..315560ace 100644 --- a/internal/storage/persistence/migrations/atlas.sum +++ b/internal/storage/persistence/migrations/atlas.sum @@ -1,4 +1,4 @@ -h1:nJnX1xFmPcxNh6rNCHZKgvIZEqrg1RFMXmK0l6lADyo= +h1:cw69TQvyChgb5KODW60VY81hc9od/PI8/9N3SAIaVlE= 20220818175139_init.down.sql h1:SU3ri3mnbAqcubHMc09bvtO1fHwsmWf0Fv3zO32TIKE= 20220818175139_init.up.sql h1:L99Oh5xRGKLr5aV9GfKdP0Wz9hMJ2jGHwdNdFYYDcj4= 20220819155618_location_config.down.sql h1:YHBToXRroeebB14Oi6BV6Ts8urWNUBIkSkt879Ant9o= @@ -9,3 +9,5 @@ h1:nJnX1xFmPcxNh6rNCHZKgvIZEqrg1RFMXmK0l6lADyo= 20221003175305_changes.up.sql h1:QMZvNsU//nSNvK53zS2eSkycCIjcUwHTcM7cVDjNk/g= 20240507234842_location_source_amss.down.sql h1:T0u74zZVsm9wI7zPECVMbwKFL4ZNO0so+CFPHxols+I= 20240507234842_location_source_amss.up.sql h1:oBjmqn1bdo3RkOT/uXi3PwsYxvufpwkkOAtRS2Fpk8M= +20250211024602_rename_package_table.down.sql h1:zk3r3cXipyGpj/fY2Cnh7vEQNmgEB/VxIGsH/LVrU+4= +20250211024602_rename_package_table.up.sql h1:kvk247jrt4aA2sMZ7bHreqKMEcc+Vywupv3qhNebn8I= diff --git a/internal/storage/persistence/persistence.go b/internal/storage/persistence/persistence.go index 0bb77120b..1d9830a9c 100644 --- a/internal/storage/persistence/persistence.go +++ b/internal/storage/persistence/persistence.go @@ -10,12 +10,12 @@ import ( ) type Storage interface { - // Package. - CreatePackage(ctx context.Context, pkg *goastorage.Package) (*goastorage.Package, error) - ListPackages(ctx context.Context) (goastorage.PackageCollection, error) - ReadPackage(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) - UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.PackageStatus) error - UpdatePackageLocationID(ctx context.Context, aipID, locationID uuid.UUID) error + // AIP. + CreateAIP(ctx context.Context, pkg *goastorage.Package) (*goastorage.Package, error) + ListAIPs(ctx context.Context) (goastorage.PackageCollection, error) + ReadAIP(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) + UpdateAIPStatus(ctx context.Context, aipID uuid.UUID, status types.AIPStatus) error + UpdateAIPLocationID(ctx context.Context, aipID, locationID uuid.UUID) error // Location. CreateLocation( @@ -25,5 +25,5 @@ type Storage interface { ) (*goastorage.Location, error) ListLocations(ctx context.Context) (goastorage.LocationCollection, error) ReadLocation(ctx context.Context, locationID uuid.UUID) (*goastorage.Location, error) - LocationPackages(ctx context.Context, locationID uuid.UUID) (goastorage.PackageCollection, error) + LocationAIPs(ctx context.Context, locationID uuid.UUID) (goastorage.PackageCollection, error) } diff --git a/internal/storage/persistence/telemetry.go b/internal/storage/persistence/telemetry.go index 09e7211c0..a3466a279 100644 --- a/internal/storage/persistence/telemetry.go +++ b/internal/storage/persistence/telemetry.go @@ -32,66 +32,66 @@ func updateError(err error, name string) error { return fmt.Errorf("%s: %w", name, err) } -func (w *wrapper) CreatePackage(ctx context.Context, pkg *goastorage.Package) (*goastorage.Package, error) { - ctx, span := w.tracer.Start(ctx, "CreatePackage") +func (w *wrapper) CreateAIP(ctx context.Context, pkg *goastorage.Package) (*goastorage.Package, error) { + ctx, span := w.tracer.Start(ctx, "CreateAIP") defer span.End() - r, err := w.wrapped.CreatePackage(ctx, pkg) + r, err := w.wrapped.CreateAIP(ctx, pkg) if err != nil { telemetry.RecordError(span, err) - return nil, updateError(err, "CreatePackage") + return nil, updateError(err, "CreateAIP") } return r, nil } -func (w *wrapper) ListPackages(ctx context.Context) (goastorage.PackageCollection, error) { - ctx, span := w.tracer.Start(ctx, "ListPackages") +func (w *wrapper) ListAIPs(ctx context.Context) (goastorage.PackageCollection, error) { + ctx, span := w.tracer.Start(ctx, "ListAIPs") defer span.End() - r, err := w.wrapped.ListPackages(ctx) + r, err := w.wrapped.ListAIPs(ctx) if err != nil { telemetry.RecordError(span, err) - return nil, updateError(err, "ListPackages") + return nil, updateError(err, "ListAIPs") } return r, nil } -func (w *wrapper) ReadPackage(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) { - ctx, span := w.tracer.Start(ctx, "ReadPackage") +func (w *wrapper) ReadAIP(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) { + ctx, span := w.tracer.Start(ctx, "ReadAIP") defer span.End() - r, err := w.wrapped.ReadPackage(ctx, aipID) + r, err := w.wrapped.ReadAIP(ctx, aipID) if err != nil { telemetry.RecordError(span, err) - return nil, updateError(err, "ReadPackage") + return nil, updateError(err, "ReadAIP") } return r, nil } -func (w *wrapper) UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.PackageStatus) error { - ctx, span := w.tracer.Start(ctx, "UpdatePackageStatus") +func (w *wrapper) UpdateAIPStatus(ctx context.Context, aipID uuid.UUID, status types.AIPStatus) error { + ctx, span := w.tracer.Start(ctx, "UpdateAIPStatus") defer span.End() - err := w.wrapped.UpdatePackageStatus(ctx, aipID, status) + err := w.wrapped.UpdateAIPStatus(ctx, aipID, status) if err != nil { telemetry.RecordError(span, err) - return updateError(err, "UpdatePackageStatus") + return updateError(err, "UpdateAIPStatus") } return nil } -func (w *wrapper) UpdatePackageLocationID(ctx context.Context, aipID, locationID uuid.UUID) error { - ctx, span := w.tracer.Start(ctx, "UpdatePackageLocationID") +func (w *wrapper) UpdateAIPLocationID(ctx context.Context, aipID, locationID uuid.UUID) error { + ctx, span := w.tracer.Start(ctx, "UpdateAIPLocationID") defer span.End() - err := w.wrapped.UpdatePackageLocationID(ctx, aipID, locationID) + err := w.wrapped.UpdateAIPLocationID(ctx, aipID, locationID) if err != nil { telemetry.RecordError(span, err) - return updateError(err, "UpdatePackageLocationID") + return updateError(err, "UpdateAIPLocationID") } return nil @@ -140,14 +140,14 @@ func (w *wrapper) ReadLocation(ctx context.Context, locationID uuid.UUID) (*goas return r, nil } -func (w *wrapper) LocationPackages(ctx context.Context, locationID uuid.UUID) (goastorage.PackageCollection, error) { - ctx, span := w.tracer.Start(ctx, "LocationPackages") +func (w *wrapper) LocationAIPs(ctx context.Context, locationID uuid.UUID) (goastorage.PackageCollection, error) { + ctx, span := w.tracer.Start(ctx, "LocationAIPs") defer span.End() - r, err := w.wrapped.LocationPackages(ctx, locationID) + r, err := w.wrapped.LocationAIPs(ctx, locationID) if err != nil { telemetry.RecordError(span, err) - return nil, updateError(err, "LocationPackages") + return nil, updateError(err, "LocationAIPs") } return r, nil diff --git a/internal/storage/service.go b/internal/storage/service.go index a47227390..315acaa59 100644 --- a/internal/storage/service.go +++ b/internal/storage/service.go @@ -31,7 +31,7 @@ type Service interface { // Used from workflow activities. Location(ctx context.Context, locationID uuid.UUID) (Location, error) ReadPackage(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) - UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.PackageStatus) error + UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.AIPStatus) error UpdatePackageLocationID(ctx context.Context, aipID, locationID uuid.UUID) error Delete(ctx context.Context, aipID uuid.UUID) (err error) @@ -148,7 +148,7 @@ func (s *serviceImpl) Submit(ctx context.Context, payload *goastorage.SubmitPayl objectKey := uuid.Must(uuid.NewRandomFromReader(s.rander)) - _, err = s.storagePersistence.CreatePackage(ctx, &goastorage.Package{ + _, err = s.storagePersistence.CreateAIP(ctx, &goastorage.Package{ Name: payload.Name, AipID: aipID, ObjectKey: objectKey, @@ -197,7 +197,7 @@ func (s *serviceImpl) Create(ctx context.Context, payload *goastorage.CreatePayl LocationID: payload.LocationID, } - return s.storagePersistence.CreatePackage(ctx, p) + return s.storagePersistence.CreateAIP(ctx, p) } func (s *serviceImpl) Update(ctx context.Context, payload *goastorage.UpdatePayload) error { @@ -213,7 +213,7 @@ func (s *serviceImpl) Update(ctx context.Context, payload *goastorage.UpdatePayl return goastorage.MakeNotAvailable(errors.New("cannot perform operation")) } // Update the package status to in_review - err = s.UpdatePackageStatus(ctx, aipID, types.StatusInReview) + err = s.UpdatePackageStatus(ctx, aipID, types.AIPStatusInReview) if err != nil { return goastorage.MakeNotValid(errors.New("cannot update package status")) } @@ -300,7 +300,7 @@ func (s *serviceImpl) Reject(ctx context.Context, payload *goastorage.RejectPayl return goastorage.MakeNotValid(errors.New("cannot perform operation")) } - return s.UpdatePackageStatus(ctx, aipID, types.StatusRejected) + return s.UpdatePackageStatus(ctx, aipID, types.AIPStatusRejected) } func (s *serviceImpl) Show(ctx context.Context, payload *goastorage.ShowPayload) (*goastorage.Package, error) { @@ -313,15 +313,15 @@ func (s *serviceImpl) Show(ctx context.Context, payload *goastorage.ShowPayload) } func (s *serviceImpl) ReadPackage(ctx context.Context, aipID uuid.UUID) (*goastorage.Package, error) { - return s.storagePersistence.ReadPackage(ctx, aipID) + return s.storagePersistence.ReadAIP(ctx, aipID) } -func (s *serviceImpl) UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.PackageStatus) error { - return s.storagePersistence.UpdatePackageStatus(ctx, aipID, status) +func (s *serviceImpl) UpdatePackageStatus(ctx context.Context, aipID uuid.UUID, status types.AIPStatus) error { + return s.storagePersistence.UpdateAIPStatus(ctx, aipID, status) } func (s *serviceImpl) UpdatePackageLocationID(ctx context.Context, aipID, locationID uuid.UUID) error { - return s.storagePersistence.UpdatePackageLocationID(ctx, aipID, locationID) + return s.storagePersistence.UpdateAIPLocationID(ctx, aipID, locationID) } // packageLocation returns the bucket and the key of the given package. @@ -445,7 +445,7 @@ func (s *serviceImpl) LocationPackages( return nil, goastorage.MakeNotValid(errors.New("cannot perform operation")) } - pkgs, err := s.storagePersistence.LocationPackages(ctx, locationID) + pkgs, err := s.storagePersistence.LocationAIPs(ctx, locationID) if err != nil { return nil, goastorage.MakeNotAvailable(errors.New("cannot perform operation")) } diff --git a/internal/storage/service_test.go b/internal/storage/service_test.go index 034121f25..8b97d7fc9 100644 --- a/internal/storage/service_test.go +++ b/internal/storage/service_test.go @@ -231,7 +231,7 @@ func TestServiceSubmit(t *testing.T) { attrs.persistenceMock. EXPECT(). - CreatePackage( + CreateAIP( gomock.AssignableToTypeOf(ctx), gomock.Any(), ). @@ -280,7 +280,7 @@ func TestServiceSubmit(t *testing.T) { attrs.persistenceMock. EXPECT(). - CreatePackage( + CreateAIP( gomock.AssignableToTypeOf(ctx), gomock.Any(), ). @@ -341,7 +341,7 @@ func TestServiceSubmit(t *testing.T) { attrs.persistenceMock. EXPECT(). - CreatePackage( + CreateAIP( gomock.AssignableToTypeOf(ctx), gomock.Any(), ). @@ -375,7 +375,7 @@ func TestServiceCreate(t *testing.T) { attrs.persistenceMock. EXPECT(). - CreatePackage( + CreateAIP( mockutil.Context(), &goastorage.Package{ Name: name, @@ -594,10 +594,10 @@ func TestReject(t *testing.T) { attrs.persistenceMock. EXPECT(). - UpdatePackageStatus( + UpdateAIPStatus( ctx, aipID, - types.StatusRejected, + types.AIPStatusRejected, ). Return(nil). Times(1) @@ -616,7 +616,7 @@ func TestServiceReadPackage(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). @@ -643,15 +643,15 @@ func TestServiceUpdatePackageStatus(t *testing.T) { attrs.persistenceMock. EXPECT(). - UpdatePackageStatus( + UpdateAIPStatus( ctx, aipID, - types.StatusStored, + types.AIPStatusStored, ). Return(errors.New("something is wrong")). Times(1) - err := svc.UpdatePackageStatus(ctx, aipID, types.StatusStored) + err := svc.UpdatePackageStatus(ctx, aipID, types.AIPStatusStored) assert.Error(t, err, "something is wrong") }) } @@ -668,7 +668,7 @@ func TestServiceUpdatePackageLocationID(t *testing.T) { attrs.persistenceMock. EXPECT(). - UpdatePackageLocationID( + UpdateAIPLocationID( ctx, aipID, locationID, @@ -696,7 +696,7 @@ func TestServiceDelete(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). @@ -728,7 +728,7 @@ func TestServiceDelete(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). @@ -774,7 +774,7 @@ func TestServiceDelete(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). @@ -821,7 +821,7 @@ func TestServiceDelete(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). @@ -860,7 +860,7 @@ func TestServiceDelete(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). @@ -1051,10 +1051,10 @@ func TestServiceUpdate(t *testing.T) { attrs.persistenceMock. EXPECT(). - UpdatePackageStatus( + UpdateAIPStatus( gomock.AssignableToTypeOf(ctx), gomock.Any(), - types.StatusInReview, + types.AIPStatusInReview, ). Return( errors.New("unexpected error"), @@ -1091,10 +1091,10 @@ func TestServiceUpdate(t *testing.T) { attrs.persistenceMock. EXPECT(). - UpdatePackageStatus( + UpdateAIPStatus( gomock.AssignableToTypeOf(ctx), gomock.Any(), - types.StatusInReview, + types.AIPStatusInReview, ). Return( nil, @@ -1134,7 +1134,7 @@ func TestServiceMove(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1178,7 +1178,7 @@ func TestServiceMove(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1223,7 +1223,7 @@ func TestServiceMove(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1268,7 +1268,7 @@ func TestServiceMoveStatus(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1308,7 +1308,7 @@ func TestServiceMoveStatus(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1353,7 +1353,7 @@ func TestServiceMoveStatus(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1397,7 +1397,7 @@ func TestServiceMoveStatus(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1440,7 +1440,7 @@ func TestServiceMoveStatus(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( gomock.AssignableToTypeOf(ctx), aipID, ). @@ -1692,7 +1692,7 @@ func TestServiceLocationPackages(t *testing.T) { attrs.persistenceMock. EXPECT(). - LocationPackages( + LocationAIPs( ctx, locationID, ). @@ -1718,7 +1718,7 @@ func TestServiceLocationPackages(t *testing.T) { attrs.persistenceMock. EXPECT(). - LocationPackages( + LocationAIPs( ctx, locationID, ). @@ -1780,7 +1780,7 @@ func TestServiceShow(t *testing.T) { attrs.persistenceMock. EXPECT(). - ReadPackage( + ReadAIP( ctx, aipID, ). diff --git a/internal/storage/types/aip_status.go b/internal/storage/types/aip_status.go new file mode 100644 index 000000000..231729d54 --- /dev/null +++ b/internal/storage/types/aip_status.go @@ -0,0 +1,97 @@ +package types + +import ( + "database/sql/driver" + "encoding/json" + "strings" +) + +type AIPStatus uint + +const ( + AIPStatusUnspecified AIPStatus = iota + AIPStatusInReview + AIPStatusRejected + AIPStatusStored + AIPStatusMoving +) + +func NewAIPStatus(status string) AIPStatus { + var s AIPStatus + + switch strings.ToLower(status) { + case "stored": + s = AIPStatusStored + case "rejected": + s = AIPStatusRejected + case "in_review": + s = AIPStatusInReview + case "moving": + s = AIPStatusMoving + default: + s = AIPStatusUnspecified + } + + return s +} + +func (a AIPStatus) String() string { + switch a { + case AIPStatusStored: + return "stored" + case AIPStatusRejected: + return "rejected" + case AIPStatusInReview: + return "in_review" + case AIPStatusMoving: + return "moving" + } + return "unspecified" +} + +func (a AIPStatus) Values() []string { + return []string{ + AIPStatusUnspecified.String(), + AIPStatusInReview.String(), + AIPStatusRejected.String(), + AIPStatusStored.String(), + AIPStatusMoving.String(), + } +} + +// Value provides the DB a string from int. +func (a AIPStatus) Value() (driver.Value, error) { + return a.String(), nil +} + +// Scan tells our code how to read the enum into our type. +func (a *AIPStatus) Scan(val interface{}) error { + var s string + switch v := val.(type) { + case nil: + return nil + case string: + s = v + case []uint8: + s = string(v) + } + + *a = NewAIPStatus(s) + + return nil +} + +func (a AIPStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(a.String()) +} + +func (a *AIPStatus) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + *a = NewAIPStatus(s) + + return nil +} diff --git a/internal/storage/types/status_test.go b/internal/storage/types/aip_status_test.go similarity index 64% rename from internal/storage/types/status_test.go rename to internal/storage/types/aip_status_test.go index 3d0f1945f..dadabacbb 100644 --- a/internal/storage/types/status_test.go +++ b/internal/storage/types/aip_status_test.go @@ -9,61 +9,61 @@ import ( "github.com/artefactual-sdps/enduro/internal/storage/types" ) -func TestPackageStatus(t *testing.T) { +func TestAIPStatus(t *testing.T) { t.Parallel() type test struct { code string - status types.PackageStatus + status types.AIPStatus } for _, tt := range []test{ { code: "unspecified", - status: types.StatusUnspecified, + status: types.AIPStatusUnspecified, }, { code: "in_review", - status: types.StatusInReview, + status: types.AIPStatusInReview, }, { code: "rejected", - status: types.StatusRejected, + status: types.AIPStatusRejected, }, { code: "stored", - status: types.StatusStored, + status: types.AIPStatusStored, }, { code: "moving", - status: types.StatusMoving, + status: types.AIPStatusMoving, }, } { t.Run(tt.code, func(t *testing.T) { t.Parallel() - assert.Equal(t, types.NewPackageStatus(tt.code), tt.status) + assert.Equal(t, types.NewAIPStatus(tt.code), tt.status) assert.Equal(t, tt.status.String(), tt.code) blob, err := json.Marshal(tt.status) assert.NilError(t, err) assert.DeepEqual(t, `"`+tt.code+`"`, string(blob)) - var st types.PackageStatus + var st types.AIPStatus err = json.Unmarshal([]byte(`"`+tt.code+`"`), &st) assert.NilError(t, err) assert.Equal(t, st, tt.status) - var ss types.PackageStatus + var ss types.AIPStatus err = ss.Scan(tt.code) assert.NilError(t, err) assert.Equal(t, ss, tt.status) assert.DeepEqual(t, ss.Values(), []string{ - types.StatusUnspecified.String(), - types.StatusInReview.String(), - types.StatusRejected.String(), - types.StatusStored.String(), - types.StatusMoving.String(), + types.AIPStatusUnspecified.String(), + types.AIPStatusInReview.String(), + types.AIPStatusRejected.String(), + types.AIPStatusStored.String(), + types.AIPStatusMoving.String(), }) v, err := ss.Value() diff --git a/internal/storage/types/status.go b/internal/storage/types/status.go deleted file mode 100644 index c70af6602..000000000 --- a/internal/storage/types/status.go +++ /dev/null @@ -1,97 +0,0 @@ -package types - -import ( - "database/sql/driver" - "encoding/json" - "strings" -) - -type PackageStatus uint - -const ( - StatusUnspecified PackageStatus = iota - StatusInReview - StatusRejected - StatusStored - StatusMoving -) - -func NewPackageStatus(status string) PackageStatus { - var s PackageStatus - - switch strings.ToLower(status) { - case "stored": - s = StatusStored - case "rejected": - s = StatusRejected - case "in_review": - s = StatusInReview - case "moving": - s = StatusMoving - default: - s = StatusUnspecified - } - - return s -} - -func (p PackageStatus) String() string { - switch p { - case StatusStored: - return "stored" - case StatusRejected: - return "rejected" - case StatusInReview: - return "in_review" - case StatusMoving: - return "moving" - } - return "unspecified" -} - -func (p PackageStatus) Values() []string { - return []string{ - StatusUnspecified.String(), - StatusInReview.String(), - StatusRejected.String(), - StatusStored.String(), - StatusMoving.String(), - } -} - -// Value provides the DB a string from int. -func (p PackageStatus) Value() (driver.Value, error) { - return p.String(), nil -} - -// Scan tells our code how to read the enum into our type. -func (p *PackageStatus) Scan(val interface{}) error { - var s string - switch v := val.(type) { - case nil: - return nil - case string: - s = v - case []uint8: - s = string(v) - } - - *p = NewPackageStatus(s) - - return nil -} - -func (p PackageStatus) MarshalJSON() ([]byte, error) { - return json.Marshal(p.String()) -} - -func (p *PackageStatus) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - - *p = NewPackageStatus(s) - - return nil -} diff --git a/internal/storage/workflows/move.go b/internal/storage/workflows/move.go index 01600c0e2..a3644c47a 100644 --- a/internal/storage/workflows/move.go +++ b/internal/storage/workflows/move.go @@ -24,7 +24,7 @@ func NewStorageMoveWorkflow(storagesvc storage.Service) *StorageMoveWorkflow { func (w *StorageMoveWorkflow) Execute(ctx temporalsdk_workflow.Context, req storage.StorageMoveWorkflowRequest) error { // Set package status to moving. { - if err := w.updatePackageStatus(ctx, types.StatusMoving, req.AIPID); err != nil { + if err := w.updatePackageStatus(ctx, types.AIPStatusMoving, req.AIPID); err != nil { return err } } @@ -97,7 +97,7 @@ func (w *StorageMoveWorkflow) Execute(ctx temporalsdk_workflow.Context, req stor // Set package status to stored. { - if err := w.updatePackageStatus(ctx, types.StatusStored, req.AIPID); err != nil { + if err := w.updatePackageStatus(ctx, types.AIPStatusStored, req.AIPID); err != nil { return err } } @@ -107,7 +107,7 @@ func (w *StorageMoveWorkflow) Execute(ctx temporalsdk_workflow.Context, req stor func (w *StorageMoveWorkflow) updatePackageStatus( ctx temporalsdk_workflow.Context, - st types.PackageStatus, + st types.AIPStatus, aipID uuid.UUID, ) error { activityOpts := temporalsdk_workflow.WithLocalActivityOptions(ctx, temporalsdk_workflow.LocalActivityOptions{ diff --git a/internal/storage/workflows/move_test.go b/internal/storage/workflows/move_test.go index 607818c47..1f353edb2 100644 --- a/internal/storage/workflows/move_test.go +++ b/internal/storage/workflows/move_test.go @@ -28,8 +28,8 @@ func TestStorageMoveWorkflow(t *testing.T) { storagesvc := fake.NewMockService(ctrl) storagesvc.EXPECT().Delete(gomock.Any(), aipID) storagesvc.EXPECT().UpdatePackageLocationID(gomock.Any(), aipID, locationID) - storagesvc.EXPECT().UpdatePackageStatus(gomock.Any(), aipID, types.StatusMoving) - storagesvc.EXPECT().UpdatePackageStatus(gomock.Any(), aipID, types.StatusStored) + storagesvc.EXPECT().UpdatePackageStatus(gomock.Any(), aipID, types.AIPStatusMoving) + storagesvc.EXPECT().UpdatePackageStatus(gomock.Any(), aipID, types.AIPStatusStored) // Worker activities env.RegisterActivityWithOptions( diff --git a/internal/workflow/activities/classify_package.go b/internal/workflow/activities/classify_package.go index 4c5635fa9..0be7331b8 100644 --- a/internal/workflow/activities/classify_package.go +++ b/internal/workflow/activities/classify_package.go @@ -20,7 +20,7 @@ type ( } ClassifyPackageActivityResult struct { // Type of the package. - Type enums.PackageType + Type enums.SIPType } ) @@ -38,9 +38,9 @@ func (a *ClassifyPackageActivity) Execute( "Path", params.Path, ) - r := ClassifyPackageActivityResult{Type: enums.PackageTypeUnknown} + r := ClassifyPackageActivityResult{Type: enums.SIPTypeUnknown} if bagit.Is(params.Path) { - r.Type = enums.PackageTypeBagIt + r.Type = enums.SIPTypeBagIt } return &r, nil diff --git a/internal/workflow/activities/classify_package_test.go b/internal/workflow/activities/classify_package_test.go index 44d1afff7..7cc9ba8a3 100644 --- a/internal/workflow/activities/classify_package_test.go +++ b/internal/workflow/activities/classify_package_test.go @@ -35,12 +35,12 @@ func TestClassifyPackageActivity(t *testing.T) { params: activities.ClassifyPackageActivityParams{ Path: fs.NewDir(t, "enduro-test").Path(), }, - want: activities.ClassifyPackageActivityResult{Type: enums.PackageTypeUnknown}, + want: activities.ClassifyPackageActivityResult{Type: enums.SIPTypeUnknown}, }, { name: "Returns a bagit package type", params: activities.ClassifyPackageActivityParams{Path: testBag(t)}, - want: activities.ClassifyPackageActivityResult{Type: enums.PackageTypeBagIt}, + want: activities.ClassifyPackageActivityResult{Type: enums.SIPTypeBagIt}, }, } { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/workflow/local_activities.go b/internal/workflow/local_activities.go index 546c9eea5..2ef9c2b0c 100644 --- a/internal/workflow/local_activities.go +++ b/internal/workflow/local_activities.go @@ -17,7 +17,7 @@ import ( type createPackageLocalActivityParams struct { Key string - Status enums.PackageStatus + Status enums.SIPStatus } func createPackageLocalActivity( @@ -27,7 +27,7 @@ func createPackageLocalActivity( ) (int, error) { info := temporalsdk_activity.GetInfo(ctx) - col := &datatypes.Package{ + col := &datatypes.SIP{ Name: params.Key, WorkflowID: info.WorkflowExecution.ID, RunID: info.WorkflowExecution.RunID, @@ -46,7 +46,7 @@ type updatePackageLocalActivityParams struct { Key string SIPID string StoredAt time.Time - Status enums.PackageStatus + Status enums.SIPStatus } type updatePackageLocalActivityResult struct{} @@ -92,7 +92,7 @@ func setStatusLocalActivity( ctx context.Context, pkgsvc package_.Service, pkgID int, - status enums.PackageStatus, + status enums.SIPStatus, ) (*setStatusLocalActivityResult, error) { return &setStatusLocalActivityResult{}, pkgsvc.SetStatus(ctx, pkgID, status) } @@ -175,7 +175,7 @@ func createPreservationActionLocalActivity( WorkflowID: params.WorkflowID, Type: params.Type, Status: params.Status, - PackageID: params.PackageID, + SIPID: params.PackageID, } if !params.StartedAt.IsZero() { pa.StartedAt = sql.NullTime{Time: params.StartedAt, Valid: true} diff --git a/internal/workflow/local_activities_test.go b/internal/workflow/local_activities_test.go index 6de856ba8..53bd0c72a 100644 --- a/internal/workflow/local_activities_test.go +++ b/internal/workflow/local_activities_test.go @@ -48,7 +48,7 @@ func TestCreatePreservationActionLocalActivity(t *testing.T) { Status: enums.PreservationActionStatusDone, StartedAt: sql.NullTime{Time: startedAt, Valid: true}, CompletedAt: sql.NullTime{Time: completedAt, Valid: true}, - PackageID: 1, + SIPID: 1, }).DoAndReturn(func(ctx context.Context, pa *datatypes.PreservationAction) error { pa.ID = 1 return nil @@ -69,7 +69,7 @@ func TestCreatePreservationActionLocalActivity(t *testing.T) { WorkflowID: "workflow-id", Type: enums.PreservationActionTypeCreateAip, Status: enums.PreservationActionStatusDone, - PackageID: 1, + SIPID: 1, }).DoAndReturn(func(ctx context.Context, pa *datatypes.PreservationAction) error { pa.ID = 1 return nil @@ -90,7 +90,7 @@ func TestCreatePreservationActionLocalActivity(t *testing.T) { WorkflowID: "workflow-id", Type: enums.PreservationActionTypeCreateAip, Status: enums.PreservationActionStatusDone, - PackageID: 1, + SIPID: 1, }).Return(fmt.Errorf("persistence error")) }, wantErr: "persistence error", diff --git a/internal/workflow/move.go b/internal/workflow/move.go index 2dddb9c6f..0dc8a918b 100644 --- a/internal/workflow/move.go +++ b/internal/workflow/move.go @@ -28,7 +28,7 @@ func (w *MoveWorkflow) Execute(ctx temporalsdk_workflow.Context, req *package_.M // Set package to in progress status. { ctx := withLocalActivityOpts(ctx) - err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, req.ID, enums.PackageStatusInProgress). + err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, req.ID, enums.SIPStatusInProgress). Get(ctx, nil) if err != nil { return err @@ -67,7 +67,7 @@ func (w *MoveWorkflow) Execute(ctx temporalsdk_workflow.Context, req *package_.M // Set package to done status. { ctx := withLocalActivityOpts(ctx) - err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, req.ID, enums.PackageStatusDone). + err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, req.ID, enums.SIPStatusDone). Get(ctx, nil) if err != nil { return err diff --git a/internal/workflow/move_test.go b/internal/workflow/move_test.go index 9c3ae9318..462898da8 100644 --- a/internal/workflow/move_test.go +++ b/internal/workflow/move_test.go @@ -62,7 +62,7 @@ func (s *MoveWorkflowTestSuite) TestSuccessfulMove() { locationID := uuid.MustParse("51328c02-2b63-47be-958e-e8088aa1a61f") // Package is set to in progress status. - s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.PackageStatusInProgress). + s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.SIPStatusInProgress). Return(nil, nil) // Move operation succeeds. @@ -85,7 +85,7 @@ func (s *MoveWorkflowTestSuite) TestSuccessfulMove() { ).Return(nil, nil) // Package is set back to done status. - s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.PackageStatusDone). + s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.SIPStatusDone). Return(nil, nil) // Package location is set. @@ -119,7 +119,7 @@ func (s *MoveWorkflowTestSuite) TestFailedMove() { locationID := uuid.MustParse("51328c02-2b63-47be-958e-e8088aa1a61f") // Package is set to in progress status. - s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.PackageStatusInProgress). + s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.SIPStatusInProgress). Return(nil, nil) // Move operation fails. @@ -133,7 +133,7 @@ func (s *MoveWorkflowTestSuite) TestFailedMove() { ).Return(nil, errors.New("error moving package")) // Package is set back to done status. - s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.PackageStatusDone). + s.env.OnActivity(setStatusLocalActivity, mock.Anything, mock.Anything, pkgID, enums.SIPStatusDone). Return(nil, nil) // Preservation action is created with failed status. diff --git a/internal/workflow/processing.go b/internal/workflow/processing.go index fb16e68ec..30abefa55 100644 --- a/internal/workflow/processing.go +++ b/internal/workflow/processing.go @@ -78,7 +78,7 @@ type TransferInfo struct { IsDir bool // PackageType is the type of the package. - PackageType enums.PackageType + PackageType enums.SIPType // TempPath is the temporary location of a working copy of the transfer. TempPath string @@ -188,7 +188,7 @@ func (w *ProcessingWorkflow) Execute(ctx temporalsdk_workflow.Context, req *pack } // Package status. All packages start in queued status. - status = enums.PackageStatusQueued + status = enums.SIPStatusQueued // Create AIP preservation action status. paStatus = enums.PreservationActionStatusUnspecified @@ -227,8 +227,8 @@ func (w *ProcessingWorkflow) Execute(ctx temporalsdk_workflow.Context, req *pack // workflow function returns. defer func() { // Mark as failed unless it completed successfully or it was abandoned. - if status != enums.PackageStatusDone && status != enums.PackageStatusAbandoned { - status = enums.PackageStatusError + if status != enums.SIPStatusDone && status != enums.SIPStatusAbandoned { + status = enums.SIPStatusError } // Use disconnected context so it also runs after cancellation. @@ -304,14 +304,14 @@ func (w *ProcessingWorkflow) Execute(ctx temporalsdk_workflow.Context, req *pack return sessErr } - status = enums.PackageStatusDone + status = enums.SIPStatusDone paStatus = enums.PreservationActionStatusDone } // Schedule deletion of the original in the watched data source. { - if status == enums.PackageStatusDone { + if status == enums.SIPStatusDone { if tinfo.req.RetentionPeriod != nil { err := temporalsdk_workflow.NewTimer(ctx, *tinfo.req.RetentionPeriod).Get(ctx, nil) if err != nil { @@ -460,12 +460,12 @@ func (w *ProcessingWorkflow) SessionHandler( // Stop the workflow if preprocessing returned a SIP path that is not a // valid bag. - if tinfo.PackageType != enums.PackageTypeBagIt && w.cfg.Preprocessing.Enabled { + if tinfo.PackageType != enums.SIPTypeBagIt && w.cfg.Preprocessing.Enabled { return errors.New("preprocessing returned a path that is not a valid bag") } // If the SIP is a BagIt Bag, validate it. - if tinfo.IsDir && tinfo.PackageType == enums.PackageTypeBagIt { + if tinfo.IsDir && tinfo.PackageType == enums.SIPTypeBagIt { id, err := w.createPreservationTask( sessCtx, datatypes.PreservationTask{ @@ -542,7 +542,7 @@ func (w *ProcessingWorkflow) SessionHandler( Key: tinfo.req.Key, SIPID: tinfo.SIPID, StoredAt: tinfo.StoredAt, - Status: enums.PackageStatusInProgress, + Status: enums.SIPStatusInProgress, }). Get(activityOpts, nil) } @@ -622,7 +622,7 @@ func (w *ProcessingWorkflow) SessionHandler( // Set package to pending status. { ctx := withLocalActivityOpts(sessCtx) - err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, tinfo.req.PackageID, enums.PackageStatusPending).Get(ctx, nil) + err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, tinfo.req.PackageID, enums.SIPStatusPending).Get(ctx, nil) if err != nil { return err } @@ -660,7 +660,7 @@ func (w *ProcessingWorkflow) SessionHandler( // Set package to in progress status. { ctx := withLocalActivityOpts(sessCtx) - err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, tinfo.req.PackageID, enums.PackageStatusInProgress).Get(ctx, nil) + err := temporalsdk_workflow.ExecuteLocalActivity(ctx, setStatusLocalActivity, w.pkgsvc, tinfo.req.PackageID, enums.SIPStatusInProgress).Get(ctx, nil) if err != nil { return err } @@ -831,7 +831,7 @@ func (w *ProcessingWorkflow) transferA3m( } tinfo.Bundle = bundleResult - tinfo.PackageType = enums.PackageTypeArchivematicaStandardTransfer + tinfo.PackageType = enums.SIPTypeArchivematicaStandardTransfer // Delete bundled transfer when session ends. cleanup.registerPath(bundleResult.FullPath) @@ -885,7 +885,7 @@ func (w *ProcessingWorkflow) transferAM(ctx temporalsdk_workflow.Context, tinfo var err error // Bag PIP if it's not already a bag. - if tinfo.PackageType != enums.PackageTypeBagIt { + if tinfo.PackageType != enums.SIPTypeBagIt { lctx := withActivityOptsForLocalAction(ctx) var zipResult bagcreate.Result err = temporalsdk_workflow.ExecuteActivity( @@ -896,7 +896,7 @@ func (w *ProcessingWorkflow) transferAM(ctx temporalsdk_workflow.Context, tinfo if err != nil { return err } - tinfo.PackageType = enums.PackageTypeBagIt + tinfo.PackageType = enums.SIPTypeBagIt } err = w.validatePREMIS( diff --git a/internal/workflow/processing_test.go b/internal/workflow/processing_test.go index 1a7cd04e1..506766224 100644 --- a/internal/workflow/processing_test.go +++ b/internal/workflow/processing_test.go @@ -463,7 +463,7 @@ func (s *ProcessingWorkflowTestSuite) TestAutoApprovedAIP() { createPackageLocalActivity, ctx, pkgsvc, - &createPackageLocalActivityParams{Key: key, Status: enums.PackageStatusQueued}, + &createPackageLocalActivityParams{Key: key, Status: enums.SIPStatusQueued}, ).Return(pkgID, nil).Once() s.env.OnActivity( setStatusInProgressLocalActivity, @@ -503,7 +503,7 @@ func (s *ProcessingWorkflowTestSuite) TestAutoApprovedAIP() { sessionCtx, activities.ClassifyPackageActivityParams{Path: extractPath}, ).Return( - &activities.ClassifyPackageActivityResult{Type: enums.PackageTypeBagIt}, nil, + &activities.ClassifyPackageActivityResult{Type: enums.SIPTypeBagIt}, nil, ) s.env.OnActivity( @@ -659,7 +659,7 @@ func (s *ProcessingWorkflowTestSuite) TestAMWorkflow() { // Activity mocks/assertions sequence s.env.OnActivity(createPackageLocalActivity, ctx, pkgsvc, - &createPackageLocalActivityParams{Key: key, Status: enums.PackageStatusQueued}, + &createPackageLocalActivityParams{Key: key, Status: enums.SIPStatusQueued}, ).Return(pkgID, nil) s.env.OnActivity(setStatusInProgressLocalActivity, ctx, pkgsvc, pkgID, mock.AnythingOfType("time.Time")). @@ -686,7 +686,7 @@ func (s *ProcessingWorkflowTestSuite) TestAMWorkflow() { sessionCtx, activities.ClassifyPackageActivityParams{Path: extractPath}, ).Return( - &activities.ClassifyPackageActivityResult{Type: enums.PackageTypeUnknown}, nil, + &activities.ClassifyPackageActivityResult{Type: enums.SIPTypeUnknown}, nil, ) // Archivematica specific activities. @@ -883,7 +883,7 @@ func (s *ProcessingWorkflowTestSuite) TestPackageRejection() { sessionCtx, activities.ClassifyPackageActivityParams{Path: extractPath}, ).Return( - &activities.ClassifyPackageActivityResult{Type: enums.PackageTypeUnknown}, nil, + &activities.ClassifyPackageActivityResult{Type: enums.SIPTypeUnknown}, nil, ) s.env.OnActivity(activities.BundleActivityName, sessionCtx, @@ -987,7 +987,7 @@ func (s *ProcessingWorkflowTestSuite) TestChildWorkflows() { createPackageLocalActivity, ctx, pkgsvc, - &createPackageLocalActivityParams{Key: key, Status: enums.PackageStatusQueued}, + &createPackageLocalActivityParams{Key: key, Status: enums.SIPStatusQueued}, ).Return(pkgID, nil) s.env.OnActivity( @@ -1071,7 +1071,7 @@ func (s *ProcessingWorkflowTestSuite) TestChildWorkflows() { sessionCtx, activities.ClassifyPackageActivityParams{Path: prepDest}, ).Return( - &activities.ClassifyPackageActivityResult{Type: enums.PackageTypeBagIt}, nil, + &activities.ClassifyPackageActivityResult{Type: enums.SIPTypeBagIt}, nil, ) s.env.OnActivity( @@ -1257,7 +1257,7 @@ func (s *ProcessingWorkflowTestSuite) TestFailedSIP() { createPackageLocalActivity, ctx, pkgsvc, - &createPackageLocalActivityParams{Key: key, Status: enums.PackageStatusQueued}, + &createPackageLocalActivityParams{Key: key, Status: enums.SIPStatusQueued}, ).Return(pkgID, nil) s.env.OnActivity( @@ -1372,7 +1372,7 @@ func (s *ProcessingWorkflowTestSuite) TestFailedPIPA3m() { createPackageLocalActivity, ctx, pkgsvc, - &createPackageLocalActivityParams{Key: key, Status: enums.PackageStatusQueued}, + &createPackageLocalActivityParams{Key: key, Status: enums.SIPStatusQueued}, ).Return(pkgID, nil) s.env.OnActivity( @@ -1416,7 +1416,7 @@ func (s *ProcessingWorkflowTestSuite) TestFailedPIPA3m() { activities.ClassifyPackageActivityName, sessionCtx, activities.ClassifyPackageActivityParams{Path: extractPath}, - ).Return(&activities.ClassifyPackageActivityResult{Type: enums.PackageTypeBagIt}, nil) + ).Return(&activities.ClassifyPackageActivityResult{Type: enums.SIPTypeBagIt}, nil) s.env.OnActivity( createPreservationTaskLocalActivity, @@ -1535,7 +1535,7 @@ func (s *ProcessingWorkflowTestSuite) TestFailedPIPAM() { createPackageLocalActivity, ctx, pkgsvc, - &createPackageLocalActivityParams{Key: key, Status: enums.PackageStatusQueued}, + &createPackageLocalActivityParams{Key: key, Status: enums.SIPStatusQueued}, ).Return(pkgID, nil) s.env.OnActivity(setStatusInProgressLocalActivity, ctx, pkgsvc, pkgID, mock.AnythingOfType("time.Time")). @@ -1570,7 +1570,7 @@ func (s *ProcessingWorkflowTestSuite) TestFailedPIPAM() { activities.ClassifyPackageActivityName, sessionCtx, activities.ClassifyPackageActivityParams{Path: extractPath}, - ).Return(&activities.ClassifyPackageActivityResult{Type: enums.PackageTypeUnknown}, nil) + ).Return(&activities.ClassifyPackageActivityResult{Type: enums.SIPTypeUnknown}, nil) s.env.OnActivity(bagcreate.Name, sessionCtx, &bagcreate.Params{SourcePath: extractPath}). Return(&bagcreate.Result{BagPath: extractPath}, nil)