diff --git a/.golangci.yml b/.golangci.yml index 178b70c3c..fbd0dd1ea 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,7 @@ linters: - misspell linters-settings: lll: - line-length: 120 + line-length: 140 tab-width: 4 mnd: ignored-functions: diff --git a/Makefile b/Makefile index 781500d5c..ad540304e 100644 --- a/Makefile +++ b/Makefile @@ -119,6 +119,12 @@ migrate: ## Run migration on development database @echo "Running PostgreSQL migration" @go run $(GO_BUILD_PARAMS) dev/migrate/main.go +generate-db: ## Generate repository/db with Jet + @echo "Generating internal/repository/postgres/db with jet" + @rm -rf internal/repository/postgres/db + @go run github.com/go-jet/jet/v2/cmd/jet -dsn=$$CARTESI_POSTGRES_ENDPOINT -schema=public -path=./internal/repository/postgres/db + @rm -rf internal/repository/postgres/db/rollupsdb/public/model + # ============================================================================= # Clean # ============================================================================= @@ -169,7 +175,7 @@ applications/echo-dapp: ## Create echo-dapp test application deploy-echo-dapp: applications/echo-dapp ## Deploy echo-dapp test application @echo "Deploying echo-dapp test application" - @./cartesi-rollups-cli app deploy -t applications/echo-dapp/ -v + @./cartesi-rollups-cli app deploy -n echo-dapp -t applications/echo-dapp/ -v # ============================================================================= # Static Analysis @@ -224,23 +230,6 @@ run-postgres: ## Run the PostgreSQL 16 docker container @echo "Starting portgres" @docker run --rm --name postgres -p 5432:5432 -d -e POSTGRES_PASSWORD=password -e POSTGRES_DB=rollupsdb -v $(CURDIR)/test/postgres/init-test-db.sh:/docker-entrypoint-initdb.d/init-test-db.sh postgres:16-alpine -run-postgraphile: ## Run the GraphQL server docker container - @docker run --rm --name postgraphile -p 10004:10004 -d --init \ - graphile/postgraphile:4.14.0 \ - --retry-on-init-fail \ - --dynamic-json \ - --no-setof-functions-contain-nulls \ - --no-ignore-rbac \ - --enable-query-batching \ - --enhance-graphiql \ - --extended-errors errcode \ - --legacy-relations omit \ - --connection "postgres://postgres:password@host.docker.internal:5432/rollupsdb?sslmode=disable" \ - --schema graphql \ - --host "0.0.0.0" \ - --port 10004 -# --append-plugins @graphile-contrib/pg-simplify-inflector \ - start: run-postgres run-devnet ## Start the anvil devnet and PostgreSQL 16 docker containers @$(MAKE) migrate diff --git a/api/openapi/inspect.yaml b/api/openapi/inspect.yaml index 174f6c068..7457c34d5 100644 --- a/api/openapi/inspect.yaml +++ b/api/openapi/inspect.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: Inspect-state HTTP API for Cartesi Rollups - version: 0.6.0 + version: 0.7.0 license: name: Apache-2.0 url: https://www.apache.org/licenses/LICENSE-2.0.html @@ -11,68 +11,27 @@ info: API that allows the DApp frontend to make inspect-state requests to the DApp backend. paths: - inspect/{dapp}/{payload}: - get: - operationId: inspect - summary: Inspect DApp state via GET - description: | - This method sends an inspect-state request to the DApp backend, passing the payload string in the URL. - The payload string should be URL-encoded; the inspect server will decode the string to UTF-8. - If the DApp frontend needs to pass a binary string to the backend then it is advised to use the POST method. - - The response contains a status string and the reports generated by the DApp backend. - The status string can be either 'accept', 'reject', or 'exception'. - In case of exception, the field exception_payload will contain the exception payload; - Otherwise, this field will be null. - - When running on machine mode, the whole Cartesi machine is rolled back after processing the inspect-state request. - On host mode, it is advised against changing the DApp backend state when processing an inspect-state request. - Notice that this method is synchronous, so it is not advised to be used for performing resource-intensive operations. - - parameters: - - in: path - name: dapp - required: true - schema: - type: string - - in: path - name: payload - required: true - schema: - type: string - - responses: - "200": - description: Inspect state response. - content: - application/json: - schema: - $ref: "#/components/schemas/InspectResult" - - default: - description: Error response. - content: - text/plain: - schema: - $ref: "#/components/schemas/Error" - inspect/{dapp}: post: operationId: inspect_post summary: Inspect DApp state via POST description: | - Differently from the GET method, the POST method sends an inspect-state request to the DApp backend by passing its payload in the request body. - The payload should be a binary. - Other than that, it behaves the same way as described in the GET method. + This POST method sends an inspect-state request to the DApp backend, using the body contents as a binary payload for the inspect method. + + The response includes a status string and reports generated by the DApp backend. If an exception occurs, the `exception_payload` field will contain the exception details; otherwise, this field will be null. + + The inspect operation is executed on a temporary fork of the machine created upon request arrival, which is discarded afterward. Note that this method is synchronous and not recommended for resource-intensive operations. parameters: - in: path name: dapp + description: dapp name or address required: true schema: type: string requestBody: + description: Binary payload content: application/octet-stream: schema: diff --git a/cmd/cartesi-rollups-cli/root/app/app.go b/cmd/cartesi-rollups-cli/root/app/app.go index 71b0f376c..573dae652 100644 --- a/cmd/cartesi-rollups-cli/root/app/app.go +++ b/cmd/cartesi-rollups-cli/root/app/app.go @@ -13,9 +13,9 @@ import ( ) var Cmd = &cobra.Command{ - Use: "app", - Short: "Application management related commands", - PersistentPreRun: common.Setup, + Use: "app", + Short: "Application management related commands", + PersistentPreRunE: common.PersistentPreRun, } func init() { diff --git a/cmd/cartesi-rollups-cli/root/app/deploy/deploy.go b/cmd/cartesi-rollups-cli/root/app/deploy/deploy.go index 9761659ff..be210e60f 100644 --- a/cmd/cartesi-rollups-cli/root/app/deploy/deploy.go +++ b/cmd/cartesi-rollups-cli/root/app/deploy/deploy.go @@ -17,6 +17,7 @@ import ( "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/pkg/contracts/iapplicationfactory" "github.com/cartesi/rollups-node/pkg/contracts/iauthorityfactory" + "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/cartesi/rollups-node/pkg/ethutil" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -34,20 +35,15 @@ var Cmd = &cobra.Command{ } const examples = `# Adds an application to Rollups Node: -cartesi-rollups-cli app deploy -a 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -i 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -t applications/echo-dapp` //nolint:lll - -const ( - statusRunning = "running" - statusNotRunning = "not-running" -) +cartesi-rollups-cli app deploy -n echo-dapp -a 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -c 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -t applications/echo-dapp` //nolint:lll var ( + name string applicationOwner string authorityOwner string templatePath string templateHash string - status string - iConsensusAddr string + consensusAddr string appFactoryAddr string authorityFactoryAddr string rpcURL string @@ -56,11 +52,21 @@ var ( salt string inputBoxBlockNumber uint64 epochLength uint64 + disabled bool printAsJSON bool noRegister bool ) func init() { + Cmd.Flags().StringVarP( + &name, + "name", + "n", + "", + "Application name", + ) + cobra.CheckErr(Cmd.MarkFlagRequired("name")) + Cmd.Flags().StringVarP( &applicationOwner, "app-owner", @@ -94,12 +100,12 @@ func init() { "Application template hash. If not provided, it will be read from the template URI", ) - Cmd.Flags().StringVarP( - &status, - "status", - "s", - statusRunning, - "Sets the application status", + Cmd.Flags().BoolVarP( + &disabled, + "disabled", + "d", + false, + "Sets the application state to disabled", ) Cmd.Flags().StringVarP( @@ -113,25 +119,32 @@ func init() { Cmd.Flags().StringVarP( &authorityFactoryAddr, "authority-factory", - "c", + "C", "0xB897F7Fe78f220aE34B7FA9493092701a873Ed45", "Authority Factory Address", ) Cmd.Flags().StringVarP( - &iConsensusAddr, - "iconsensus", - "i", + &consensusAddr, + "consensus", + "c", "", "Application IConsensus Address", ) + Cmd.Flags().Uint64VarP( + &epochLength, + "epoch-length", + "e", + 10, + "Consensus Epoch length. If consensus address is provided, the value will be read from the contract", + ) + Cmd.Flags().StringVarP(&rpcURL, "rpc-url", "r", "http://localhost:8545", "Ethereum RPC URL") Cmd.Flags().StringVarP(&privateKey, "private-key", "k", "", "Private key for signing transactions") Cmd.Flags().StringVarP(&mnemonic, "mnemonic", "m", ethutil.FoundryMnemonic, "Mnemonic for signing transactions") Cmd.Flags().StringVar(&salt, "salt", "0000000000000000000000000000000000000000000000000000000000000000", "salt") - Cmd.Flags().Uint64VarP(&inputBoxBlockNumber, "inputbox-block-number", "n", 0, "InputBox deployment block number") - Cmd.Flags().Uint64VarP(&epochLength, "epoch-length", "e", 10, "Consensus Epoch length") + Cmd.Flags().Uint64VarP(&inputBoxBlockNumber, "inputbox-block-number", "i", 0, "InputBox deployment block number") Cmd.Flags().BoolVarP(&printAsJSON, "print-json", "j", false, "Prints the application data as JSON") Cmd.Flags().BoolVar(&noRegister, "no-register", false, "Don't register the application on the node. Only deploy contracts") } @@ -139,19 +152,13 @@ func init() { func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommom.Database == nil { + if cmdcommom.Repository == nil { panic("Database was not initialized") } - var applicationStatus model.ApplicationStatus - switch status { - case statusRunning: - applicationStatus = model.ApplicationStatusRunning - case statusNotRunning: - applicationStatus = model.ApplicationStatusNotRunning - default: - fmt.Fprintf(os.Stderr, "Invalid application status: %s\n", status) - os.Exit(1) + applicationState := model.ApplicationState_Enabled + if disabled { + applicationState = model.ApplicationState_Disabled } if templateHash == "" { @@ -163,37 +170,46 @@ func run(cmd *cobra.Command, args []string) { } } - var consensusAddr common.Address + var consensus common.Address var err error - if iConsensusAddr == "" { + if consensusAddr == "" { authorityFactoryAddress := common.HexToAddress(authorityFactoryAddr) - consensusAddr, err = deployAuthority(ctx, authorityOwner, authorityFactoryAddress, epochLength, salt) + consensus, err = deployAuthority(ctx, authorityOwner, authorityFactoryAddress, epochLength, salt) if err != nil { fmt.Fprintf(os.Stderr, "Authoriy contract creation failed: %v\n", err) os.Exit(1) } } else { - consensusAddr = common.HexToAddress(iConsensusAddr) + consensus = common.HexToAddress(consensusAddr) + epochLength, err = getEpochLength(consensus) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get epoch length from consensus: %v\n", err) + os.Exit(1) + } } applicationFactoryAddress := common.HexToAddress(appFactoryAddr) - appAddr, err := deployApplication(ctx, applicationOwner, applicationFactoryAddress, consensusAddr, templateHash, salt) + appAddr, err := deployApplication(ctx, applicationOwner, applicationFactoryAddress, consensus, templateHash, salt) if err != nil { fmt.Fprintf(os.Stderr, "Application contract creation failed: %v\n", err) os.Exit(1) } application := model.Application{ - ContractAddress: appAddr, - TemplateUri: templatePath, - TemplateHash: common.HexToHash(templateHash), - LastProcessedBlock: inputBoxBlockNumber, - Status: applicationStatus, - IConsensusAddress: consensusAddr, + Name: name, + IApplicationAddress: appAddr, + IConsensusAddress: consensus, + TemplateURI: templatePath, + TemplateHash: common.HexToHash(templateHash), + EpochLength: epochLength, + State: applicationState, + LastProcessedBlock: inputBoxBlockNumber, + LastOutputCheckBlock: inputBoxBlockNumber, + LastClaimCheckBlock: inputBoxBlockNumber, } if !noRegister { - _, err = cmdcommom.Database.InsertApplication(ctx, &application) + _, err = cmdcommom.Repository.CreateApplication(ctx, &application) cobra.CheckErr(err) } @@ -205,7 +221,7 @@ func run(cmd *cobra.Command, args []string) { } fmt.Println(string(jsonData)) } else { - fmt.Printf("Application %v successfully deployed\n", application.ContractAddress) + fmt.Printf("Application %v successfully deployed\n", application.IApplicationAddress) } } @@ -413,6 +429,26 @@ func getAuth(ctx context.Context, client *ethclient.Client) (*bind.TransactOpts, return auth, nil } +func getEpochLength( + consensusAddr common.Address, +) (uint64, error) { + client, err := ethclient.Dial(rpcURL) + if err != nil { + return 0, fmt.Errorf("Failed to connect to the Ethereum client: %v", err) + } + + consensus, err := iconsensus.NewIConsensus(consensusAddr, client) + if err != nil { + return 0, fmt.Errorf("Failed to instantiate contract: %v", err) + } + + epochLengthRaw, err := consensus.GetEpochLength(nil) + if err != nil { + return 0, fmt.Errorf("error retrieving application epoch length: %v", err) + } + return epochLengthRaw.Uint64(), nil +} + func toBytes32(data []byte) [32]byte { var arr [32]byte if len(data) != 32 { diff --git a/cmd/cartesi-rollups-cli/root/app/list/list.go b/cmd/cartesi-rollups-cli/root/app/list/list.go index bcad5d636..2769eebd1 100644 --- a/cmd/cartesi-rollups-cli/root/app/list/list.go +++ b/cmd/cartesi-rollups-cli/root/app/list/list.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/internal/repository" "github.com/spf13/cobra" ) @@ -24,11 +25,11 @@ cartesi-rollups-cli app list` func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if common.Database == nil { + if common.Repository == nil { panic("Database was not initialized") } - applications, err := common.Database.GetAllApplications(ctx) + applications, err := common.Repository.ListApplications(ctx, repository.ApplicationFilter{}, repository.Pagination{}) cobra.CheckErr(err) result, err := json.MarshalIndent(applications, "", " ") cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/app/register/register.go b/cmd/cartesi-rollups-cli/root/app/register/register.go index 701f72a57..d669c3151 100644 --- a/cmd/cartesi-rollups-cli/root/app/register/register.go +++ b/cmd/cartesi-rollups-cli/root/app/register/register.go @@ -11,7 +11,10 @@ import ( cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" "github.com/cartesi/rollups-node/internal/advancer/snapshot" "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/iapplication" + "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" "github.com/spf13/cobra" ) @@ -23,24 +26,30 @@ var Cmd = &cobra.Command{ } const examples = `# Adds an application to Rollups Node: -cartesi-rollups-cli app register -a 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -i 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA` //nolint:lll - -const ( - statusRunning = "running" - statusNotRunning = "not-running" -) +cartesi-rollups-cli app register -n echo-dapp -a 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF` //nolint:lll var ( - applicationAddress string - templatePath string - templateHash string - inputBoxDeploymentBlockNumber uint64 - status string - iConsensusAddress string - printAsJSON bool + name string + applicationAddress string + consensusAddress string + templatePath string + templateHash string + epochLength uint64 + inputBoxBlockNumber uint64 + rpcURL string + disabled bool + printAsJSON bool ) func init() { + Cmd.Flags().StringVarP( + &name, + "name", + "n", + "", + "Application name", + ) + cobra.CheckErr(Cmd.MarkFlagRequired("name")) Cmd.Flags().StringVarP( &applicationAddress, @@ -52,13 +61,12 @@ func init() { cobra.CheckErr(Cmd.MarkFlagRequired("address")) Cmd.Flags().StringVarP( - &iConsensusAddress, - "iconsensus", - "i", + &consensusAddress, + "consensus", + "c", "", - "Application IConsensus Address", + "Application IConsensus Address. If not provided the value will be read from the contract", ) - cobra.CheckErr(Cmd.MarkFlagRequired("iconsensus")) Cmd.Flags().StringVarP( &templatePath, @@ -78,19 +86,27 @@ func init() { ) Cmd.Flags().Uint64VarP( - &inputBoxDeploymentBlockNumber, + &inputBoxBlockNumber, "inputbox-block-number", - "n", + "i", 0, "InputBox deployment block number", ) - Cmd.Flags().StringVarP( - &status, - "status", - "s", - statusRunning, - "Sets the application status", + Cmd.Flags().Uint64VarP( + &epochLength, + "epoch-length", + "e", + 10, + "Consensus Epoch length. If not provided the value will be read from the contract", + ) + + Cmd.Flags().BoolVarP( + &disabled, + "disabled", + "d", + false, + "Sets the application state to disabled", ) Cmd.Flags().BoolVarP( @@ -100,24 +116,20 @@ func init() { false, "Prints the application data as JSON", ) + + Cmd.Flags().StringVarP(&rpcURL, "rpc-url", "r", "http://localhost:8545", "Ethereum RPC URL") } func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { + if cmdcommon.Repository == nil { panic("Database was not initialized") } - var applicationStatus model.ApplicationStatus - switch status { - case statusRunning: - applicationStatus = model.ApplicationStatusRunning - case statusNotRunning: - applicationStatus = model.ApplicationStatusNotRunning - default: - fmt.Fprintf(os.Stderr, "Invalid application status: %s\n", status) - os.Exit(1) + applicationState := model.ApplicationState_Enabled + if disabled { + applicationState = model.ApplicationState_Disabled } if templateHash == "" { @@ -129,16 +141,42 @@ func run(cmd *cobra.Command, args []string) { } } + address := common.HexToAddress(applicationAddress) + var consensus common.Address + var err error + if cmd.Flags().Changed("consensus") { + consensus = common.HexToAddress(consensusAddress) + } else { + consensus, err = getConsensus(address) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get consensus address from application: %v\n", err) + os.Exit(1) + } + + } + + if !cmd.Flags().Changed("epochLength") { + epochLength, err = getEpochLength(consensus) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get epoch length from consensus: %v\n", err) + os.Exit(1) + } + } + application := model.Application{ - ContractAddress: common.HexToAddress(applicationAddress), - TemplateUri: templatePath, - TemplateHash: common.HexToHash(templateHash), - LastProcessedBlock: inputBoxDeploymentBlockNumber, - Status: applicationStatus, - IConsensusAddress: common.HexToAddress(iConsensusAddress), + Name: name, + IApplicationAddress: address, + IConsensusAddress: consensus, + TemplateURI: templatePath, + TemplateHash: common.HexToHash(templateHash), + EpochLength: epochLength, + State: applicationState, + LastProcessedBlock: inputBoxBlockNumber, + LastOutputCheckBlock: inputBoxBlockNumber, + LastClaimCheckBlock: inputBoxBlockNumber, } - _, err := cmdcommon.Database.InsertApplication(ctx, &application) + _, err = cmdcommon.Repository.CreateApplication(ctx, &application) cobra.CheckErr(err) if printAsJSON { @@ -149,6 +187,46 @@ func run(cmd *cobra.Command, args []string) { } fmt.Println(string(jsonData)) } else { - fmt.Printf("Application %v successfully registered\n", application.ContractAddress) + fmt.Printf("Application %v successfully registered\n", application.IApplicationAddress) + } +} + +func getConsensus( + appAddress common.Address, +) (common.Address, error) { + client, err := ethclient.Dial(rpcURL) + if err != nil { + return common.Address{}, fmt.Errorf("Failed to connect to the Ethereum client: %v", err) + } + + app, err := iapplication.NewIApplication(appAddress, client) + if err != nil { + return common.Address{}, fmt.Errorf("Failed to instantiate contract: %v", err) + } + + consensus, err := app.GetConsensus(nil) + if err != nil { + return common.Address{}, fmt.Errorf("error retrieving application epoch length: %v", err) + } + return consensus, nil +} + +func getEpochLength( + consensusAddr common.Address, +) (uint64, error) { + client, err := ethclient.Dial(rpcURL) + if err != nil { + return 0, fmt.Errorf("Failed to connect to the Ethereum client: %v", err) + } + + consensus, err := iconsensus.NewIConsensus(consensusAddr, client) + if err != nil { + return 0, fmt.Errorf("Failed to instantiate contract: %v", err) + } + + epochLengthRaw, err := consensus.GetEpochLength(nil) + if err != nil { + return 0, fmt.Errorf("error retrieving application epoch length: %v", err) } + return epochLengthRaw.Uint64(), nil } diff --git a/cmd/cartesi-rollups-cli/root/app/status/status.go b/cmd/cartesi-rollups-cli/root/app/status/status.go index 78e484e03..5400c1a28 100644 --- a/cmd/cartesi-rollups-cli/root/app/status/status.go +++ b/cmd/cartesi-rollups-cli/root/app/status/status.go @@ -7,10 +7,10 @@ import ( "fmt" "os" + "github.com/spf13/cobra" + cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" "github.com/cartesi/rollups-node/internal/model" - "github.com/ethereum/go-ethereum/common" - "github.com/spf13/cobra" ) var Cmd = &cobra.Command{ @@ -21,22 +21,31 @@ var Cmd = &cobra.Command{ } const examples = `# Get application status: -cartesi-rollups-cli app status -a 0x000000000000000000000000000000000` +cartesi-rollups-cli app status -n echo-dapp` var ( + name string + address string enable bool disable bool ) func init() { Cmd.Flags().StringVarP( - &cmdcommon.ApplicationAddress, + &name, + "name", + "n", + "", + "Application name", + ) + + Cmd.Flags().StringVarP( + &address, "address", "a", "", "Application contract address", ) - cobra.CheckErr(Cmd.MarkFlagRequired("address")) Cmd.Flags().BoolVarP( &enable, @@ -54,36 +63,64 @@ func init() { "Disable the application", ) + Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if name == "" && address == "" { + return fmt.Errorf("either 'name' or 'address' must be specified") + } + if name != "" && address != "" { + return fmt.Errorf("only one of 'name' or 'address' can be specified") + } + if cmd.Flags().Changed("enable") && cmd.Flags().Changed("disable") { + return fmt.Errorf("Cannot enable and disable at the same time") + } + return nil + } + } func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { - panic("Database was not initialized") + if cmdcommon.Repository == nil { + panic("Repository was not initialized") + } + + var nameOrAddress string + if cmd.Flags().Changed("name") { + nameOrAddress = name + } else if cmd.Flags().Changed("address") { + nameOrAddress = address } - address := common.HexToAddress(cmdcommon.ApplicationAddress) - application, err := cmdcommon.Database.GetApplication(ctx, address) + app, err := cmdcommon.Repository.GetApplication(ctx, nameOrAddress) cobra.CheckErr(err) if (!cmd.Flags().Changed("enable")) && (!cmd.Flags().Changed("disable")) { - fmt.Println(application.Status) + fmt.Println(app.State) os.Exit(0) } - if cmd.Flags().Changed("enable") && cmd.Flags().Changed("disable") { - fmt.Fprintln(os.Stderr, "Cannot enable and disable at the same time") + if app.State == model.ApplicationState_Inoperable { + fmt.Fprintf(os.Stderr, "Error: Cannot execute operation. Application %s is on %s state\n", app.Name, app.State) os.Exit(1) } - status := model.ApplicationStatusRunning - if cmd.Flags().Changed("disable") { - status = model.ApplicationStatusNotRunning + dirty := false + if cmd.Flags().Changed("enable") && app.State == model.ApplicationState_Disabled { + app.State = model.ApplicationState_Enabled + dirty = true + } else if cmd.Flags().Changed("disable") && app.State == model.ApplicationState_Enabled { + app.State = model.ApplicationState_Disabled + dirty = true + } + + if !dirty { + fmt.Printf("Application %s status was already %s\n", app.Name, app.State) + os.Exit(0) } - err = cmdcommon.Database.UpdateApplicationStatus(ctx, address, status) + err = cmdcommon.Repository.UpdateApplicationState(ctx, app) cobra.CheckErr(err) - fmt.Printf("Application status updated to %s\n", status) + fmt.Printf("Application %s status updated to %s\n", app.Name, app.State) } diff --git a/cmd/cartesi-rollups-cli/root/common/common.go b/cmd/cartesi-rollups-cli/root/common/common.go index 92553cb68..7acb7e706 100644 --- a/cmd/cartesi-rollups-cli/root/common/common.go +++ b/cmd/cartesi-rollups-cli/root/common/common.go @@ -4,23 +4,26 @@ package common import ( - "log/slog" + "github.com/spf13/cobra" "github.com/cartesi/rollups-node/internal/repository" - "github.com/spf13/cobra" + "github.com/cartesi/rollups-node/internal/repository/factory" ) var ( - PostgresEndpoint string - ApplicationAddress string - Database *repository.Database + PostgresEndpoint string + Repository repository.Repository ) -func Setup(cmd *cobra.Command, args []string) { +func PersistentPreRun(cmd *cobra.Command, args []string) error { ctx := cmd.Context() var err error - Database, err = repository.Connect(ctx, PostgresEndpoint, slog.Default()) - cobra.CheckErr(err) + Repository, err = factory.NewRepositoryFromConnectionString(ctx, PostgresEndpoint) + if err != nil { + return err + } + + return nil } diff --git a/cmd/cartesi-rollups-cli/root/db/check/check.go b/cmd/cartesi-rollups-cli/root/db/check/check.go index f270812a4..d73ae9fc9 100644 --- a/cmd/cartesi-rollups-cli/root/db/check/check.go +++ b/cmd/cartesi-rollups-cli/root/db/check/check.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" - "github.com/cartesi/rollups-node/internal/repository/schema" + "github.com/cartesi/rollups-node/internal/repository/postgres/schema" "github.com/spf13/cobra" ) diff --git a/cmd/cartesi-rollups-cli/root/db/upgrade/upgrade.go b/cmd/cartesi-rollups-cli/root/db/upgrade/upgrade.go index 08585516b..cef7087e3 100644 --- a/cmd/cartesi-rollups-cli/root/db/upgrade/upgrade.go +++ b/cmd/cartesi-rollups-cli/root/db/upgrade/upgrade.go @@ -9,7 +9,7 @@ import ( "time" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" - "github.com/cartesi/rollups-node/internal/repository/schema" + "github.com/cartesi/rollups-node/internal/repository/postgres/schema" "github.com/spf13/cobra" ) diff --git a/cmd/cartesi-rollups-cli/root/execute/execute.go b/cmd/cartesi-rollups-cli/root/execute/execute.go index a2db11a32..c1426b765 100644 --- a/cmd/cartesi-rollups-cli/root/execute/execute.go +++ b/cmd/cartesi-rollups-cli/root/execute/execute.go @@ -7,12 +7,11 @@ import ( "fmt" "os" - cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" - "github.com/cartesi/rollups-node/pkg/ethutil" - - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/spf13/cobra" + + cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/pkg/ethutil" ) var Cmd = &cobra.Command{ @@ -20,13 +19,14 @@ var Cmd = &cobra.Command{ Short: "Executes a voucher", Example: examples, Run: run, - PreRun: cmdcommon.Setup, } const examples = `# Executes voucher/output with index 5: -cartesi-rollups-cli execute --output-index 5 -a 0x000000000000000000000000000000000` +cartesi-rollups-cli execute -n echo-dapp --output-index 5` var ( + name string + address string outputIndex uint64 ethEndpoint string mnemonic string @@ -35,13 +35,20 @@ var ( func init() { Cmd.Flags().StringVarP( - &cmdcommon.ApplicationAddress, + &name, + "name", + "n", + "", + "Application name", + ) + + Cmd.Flags().StringVarP( + &address, "address", "a", "", "Application contract address", ) - cobra.CheckErr(Cmd.MarkFlagRequired("address")) Cmd.Flags().StringVarP( &cmdcommon.PostgresEndpoint, @@ -64,18 +71,32 @@ func init() { Cmd.Flags().Uint32Var(&account, "account", 0, "account index used to sign the transaction (default: 0)") + Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if name == "" && address == "" { + return fmt.Errorf("either 'name' or 'address' must be specified") + } + if name != "" && address != "" { + return fmt.Errorf("only one of 'name' or 'address' can be specified") + } + return cmdcommon.PersistentPreRun(cmd, args) + } } func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { + if cmdcommon.Repository == nil { panic("Database was not initialized") } - application := common.HexToAddress(cmdcommon.ApplicationAddress) + var nameOrAddress string + if cmd.Flags().Changed("name") { + nameOrAddress = name + } else if cmd.Flags().Changed("address") { + nameOrAddress = address + } - output, err := cmdcommon.Database.GetOutput(ctx, application, outputIndex) + output, err := cmdcommon.Repository.GetOutput(ctx, nameOrAddress, outputIndex) cobra.CheckErr(err) if output == nil { @@ -83,6 +104,9 @@ func run(cmd *cobra.Command, args []string) { os.Exit(1) } + app, err := cmdcommon.Repository.GetApplication(ctx, nameOrAddress) + cobra.CheckErr(err) + if len(output.OutputHashesSiblings) == 0 { fmt.Fprintf(os.Stderr, "The voucher/output with index %d has no associated proof yet\n", outputIndex) os.Exit(1) @@ -94,11 +118,11 @@ func run(cmd *cobra.Command, args []string) { signer, err := ethutil.NewMnemonicSigner(ctx, client, mnemonic, account) cobra.CheckErr(err) - fmt.Printf("Executing voucher app: %v output_index: %v\n", application, outputIndex) + fmt.Printf("Executing voucher app: %v (%v) output_index: %v with account: %v\n", app.Name, app.IApplicationAddress, outputIndex, signer.Account()) txHash, err := ethutil.ExecuteOutput( ctx, client, - application, + app.IApplicationAddress, signer, outputIndex, output.RawData, diff --git a/cmd/cartesi-rollups-cli/root/inspect/inspect.go b/cmd/cartesi-rollups-cli/root/inspect/inspect.go index b8daa63bb..e9212d973 100644 --- a/cmd/cartesi-rollups-cli/root/inspect/inspect.go +++ b/cmd/cartesi-rollups-cli/root/inspect/inspect.go @@ -8,10 +8,13 @@ import ( "encoding/json" "fmt" "io" - "net/url" + "os" + "strings" - "github.com/cartesi/rollups-node/pkg/inspectclient" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/spf13/cobra" + + "github.com/cartesi/rollups-node/pkg/inspectclient" ) var Cmd = &cobra.Command{ @@ -22,41 +25,105 @@ var Cmd = &cobra.Command{ } const examples = `# Makes a request with "hi": -cartesi-rollups-cli inspect -a 0x000000000000000000000000000000000 --payload "hi"` +cartesi-rollups-cli inspect -n echo-dapp --payload "hi" + +# Makes a request with "hi" encoded as hex: +cartesi-rollups-cli inspect -n echo-dapp --payload 0x6869 --hex + +# Reads payload from stdin: +echo -n "hi" | cartesi-rollups-cli inspect -n echo-dapp` var ( - applicationAddress string - payload string - inspectEndpoint string + name string + address string + cmdPayload string + isHex bool + inspectEndpoint string ) func init() { Cmd.Flags().StringVarP( - &applicationAddress, + &name, + "name", + "n", + "", + "Application name", + ) + + Cmd.Flags().StringVarP( + &address, "address", "a", "", "Application contract address", ) - cobra.CheckErr(Cmd.MarkFlagRequired("address")) - Cmd.Flags().StringVar(&payload, "payload", "", + Cmd.Flags().StringVar(&cmdPayload, "payload", "", "input payload") - cobra.CheckErr(Cmd.MarkFlagRequired("payload")) - Cmd.Flags().StringVar(&inspectEndpoint, "inspect-endpoint", "http://localhost:10000/", + Cmd.Flags().BoolVarP(&isHex, "hex", "x", false, + "Force interpretation of --payload as hex.") + + Cmd.Flags().StringVar(&inspectEndpoint, "inspect-endpoint", "http://localhost:10012/", "address used to connect to the inspect api") + + Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if name == "" && address == "" { + return fmt.Errorf("either 'name' or 'address' must be specified") + } + if name != "" && address != "" { + return fmt.Errorf("only one of 'name' or 'address' can be specified") + } + return nil + } +} + +func resolvePayload(cmd *cobra.Command) ([]byte, error) { + if !cmd.Flags().Changed("payload") { + stdinBytes, err := io.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("failed to read from stdin: %w", err) + } + return stdinBytes, nil + } + + if isHex { + return decodeHex(cmdPayload) + } + + return []byte(cmdPayload), nil +} + +func decodeHex(s string) ([]byte, error) { + if !strings.HasPrefix(s, "0x") && !strings.HasPrefix(s, "0X") { + s = "0x" + s + } + + b, err := hexutil.Decode(s) + if err != nil { + return nil, fmt.Errorf("invalid hex payload %q: %w", s, err) + } + return b, nil } func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() + + var nameOrAddress string + if cmd.Flags().Changed("name") { + nameOrAddress = name + } else if cmd.Flags().Changed("address") { + nameOrAddress = address + } + client, err := inspectclient.NewClient(inspectEndpoint) cobra.CheckErr(err) - encodedPayload := url.PathEscape(payload) - requestBody := bytes.NewReader([]byte(encodedPayload)) + payload, err := resolvePayload(cmd) + cobra.CheckErr(err) + requestBody := bytes.NewReader(payload) - response, err := client.InspectPostWithBody(ctx, applicationAddress, "application/octet-stream", requestBody) + response, err := client.InspectPostWithBody(ctx, nameOrAddress, "application/octet-stream", requestBody) cobra.CheckErr(err) defer response.Body.Close() diff --git a/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go b/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go index f4114f23d..5c641d926 100644 --- a/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go +++ b/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go @@ -8,8 +8,8 @@ import ( "fmt" cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/internal/repository" - "github.com/ethereum/go-ethereum/common" "github.com/spf13/cobra" ) @@ -21,35 +21,39 @@ var Cmd = &cobra.Command{ } const examples = `# Read all reports: -cartesi-rollups-cli read epochs -a 0x000000000000000000000000000000000` +cartesi-rollups-cli read epochs -n echo-dapp` var ( epochIndex uint64 ) func init() { - Cmd.Flags().Uint64Var(&epochIndex, "epoch-index", 0, - "index of the epoch") - + Cmd.Flags().Uint64Var(&epochIndex, "epoch-index", 0, "index of the epoch") } func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { - panic("Database was not initialized") + if cmdcommon.Repository == nil { + panic("Repository was not initialized") } - application := common.HexToAddress(cmdcommon.ApplicationAddress) + var nameOrAddress string + pFlags := cmd.Flags() + if pFlags.Changed("name") { + nameOrAddress = pFlags.Lookup("name").Value.String() + } else if pFlags.Changed("address") { + nameOrAddress = pFlags.Lookup("address").Value.String() + } var result []byte if cmd.Flags().Changed("epoch-index") { - reports, err := cmdcommon.Database.GetEpoch(ctx, epochIndex, application) + reports, err := cmdcommon.Repository.GetEpoch(ctx, nameOrAddress, epochIndex) cobra.CheckErr(err) result, err = json.MarshalIndent(reports, "", " ") cobra.CheckErr(err) } else { - reports, err := cmdcommon.Database.GetEpochs(ctx, application) + reports, err := cmdcommon.Repository.ListEpochs(ctx, nameOrAddress, repository.EpochFilter{}, repository.Pagination{}) cobra.CheckErr(err) result, err = json.MarshalIndent(reports, "", " ") cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go b/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go index f18828f6f..e56cd4c0b 100644 --- a/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go +++ b/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go @@ -8,8 +8,8 @@ import ( "fmt" cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/internal/repository" - "github.com/ethereum/go-ethereum/common" "github.com/spf13/cobra" ) @@ -21,7 +21,7 @@ var Cmd = &cobra.Command{ } const examples = `# Read inputs from GraphQL: -cartesi-rollups-cli read inputs -a 0x000000000000000000000000000000000` +cartesi-rollups-cli read inputs -n echo-dapp` var ( index uint64 @@ -35,20 +35,26 @@ func init() { func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { - panic("Database was not initialized") + if cmdcommon.Repository == nil { + panic("Repository was not initialized") } - application := common.HexToAddress(cmdcommon.ApplicationAddress) + var nameOrAddress string + pFlags := cmd.Flags() + if pFlags.Changed("name") { + nameOrAddress = pFlags.Lookup("name").Value.String() + } else if pFlags.Changed("address") { + nameOrAddress = pFlags.Lookup("address").Value.String() + } var result []byte if cmd.Flags().Changed("index") { - inputs, err := cmdcommon.Database.GetInput(ctx, application, index) + inputs, err := cmdcommon.Repository.GetInput(ctx, nameOrAddress, index) cobra.CheckErr(err) result, err = json.MarshalIndent(inputs, "", " ") cobra.CheckErr(err) } else { - inputs, err := cmdcommon.Database.GetInputs(ctx, application) + inputs, err := cmdcommon.Repository.ListInputs(ctx, nameOrAddress, repository.InputFilter{}, repository.Pagination{}) cobra.CheckErr(err) result, err = json.MarshalIndent(inputs, "", " ") cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go b/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go index ab503da16..133e42c4b 100644 --- a/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go +++ b/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go @@ -8,10 +8,10 @@ import ( "fmt" "os" - cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" - - "github.com/ethereum/go-ethereum/common" "github.com/spf13/cobra" + + cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/internal/repository" ) var Cmd = &cobra.Command{ @@ -22,7 +22,7 @@ var Cmd = &cobra.Command{ } const examples = `# Read all notices: -cartesi-rollups-cli read outputs -a 0x000000000000000000000000000000000` +cartesi-rollups-cli read outputs -n echo-dapp` var ( outputIndex uint64 @@ -40,11 +40,17 @@ func init() { func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { - panic("Database was not initialized") + if cmdcommon.Repository == nil { + panic("Repository was not initialized") } - application := common.HexToAddress(cmdcommon.ApplicationAddress) + var nameOrAddress string + pFlags := cmd.Flags() + if pFlags.Changed("name") { + nameOrAddress = pFlags.Lookup("name").Value.String() + } else if pFlags.Changed("address") { + nameOrAddress = pFlags.Lookup("address").Value.String() + } var result []byte if cmd.Flags().Changed("output-index") { @@ -52,17 +58,21 @@ func run(cmd *cobra.Command, args []string) { fmt.Fprintf(os.Stderr, "Error: Only one of 'output-index' or 'input-index' can be used at a time.\n") os.Exit(1) } - outputs, err := cmdcommon.Database.GetOutput(ctx, application, outputIndex) + outputs, err := cmdcommon.Repository.GetOutput(ctx, nameOrAddress, outputIndex) cobra.CheckErr(err) result, err = json.MarshalIndent(outputs, "", " ") cobra.CheckErr(err) } else if cmd.Flags().Changed("input-index") { - outputs, err := cmdcommon.Database.GetOutputsByInputIndex(ctx, application, inputIndex) + f := repository.OutputFilter{InputIndex: &inputIndex} + p := repository.Pagination{} + outputs, err := cmdcommon.Repository.ListOutputs(ctx, nameOrAddress, f, p) cobra.CheckErr(err) result, err = json.MarshalIndent(outputs, "", " ") cobra.CheckErr(err) } else { - outputs, err := cmdcommon.Database.GetOutputs(ctx, application) + f := repository.OutputFilter{} + p := repository.Pagination{} + outputs, err := cmdcommon.Repository.ListOutputs(ctx, nameOrAddress, f, p) cobra.CheckErr(err) result, err = json.MarshalIndent(outputs, "", " ") cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/read/read.go b/cmd/cartesi-rollups-cli/root/read/read.go index c7e343df4..611fb64da 100644 --- a/cmd/cartesi-rollups-cli/root/read/read.go +++ b/cmd/cartesi-rollups-cli/root/read/read.go @@ -4,6 +4,8 @@ package read import ( + "fmt" + "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/epochs" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/inputs" @@ -14,20 +16,31 @@ import ( ) var Cmd = &cobra.Command{ - Use: "read", - Short: "Read the node state from the database", - PersistentPreRun: common.Setup, + Use: "read", + Short: "Read the node state from the database", } +var ( + name string + address string +) + func init() { Cmd.PersistentFlags().StringVarP( - &common.ApplicationAddress, + &name, + "name", + "n", + "", + "Application name", + ) + + Cmd.PersistentFlags().StringVarP( + &address, "address", "a", "", "Application contract address", ) - cobra.CheckErr(Cmd.MarkPersistentFlagRequired("address")) Cmd.PersistentFlags().StringVarP( &common.PostgresEndpoint, @@ -37,6 +50,16 @@ func init() { "Postgres endpoint", ) + Cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { + if name == "" && address == "" { + return fmt.Errorf("either 'name' or 'address' must be specified") + } + if name != "" && address != "" { + return fmt.Errorf("only one of 'name' or 'address' can be specified") + } + return common.PersistentPreRun(cmd, args) + } + Cmd.AddCommand(epochs.Cmd) Cmd.AddCommand(inputs.Cmd) Cmd.AddCommand(outputs.Cmd) diff --git a/cmd/cartesi-rollups-cli/root/read/reports/reports.go b/cmd/cartesi-rollups-cli/root/read/reports/reports.go index 5b0116f9b..754fd6022 100644 --- a/cmd/cartesi-rollups-cli/root/read/reports/reports.go +++ b/cmd/cartesi-rollups-cli/root/read/reports/reports.go @@ -8,10 +8,10 @@ import ( "fmt" "os" - cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" - - "github.com/ethereum/go-ethereum/common" "github.com/spf13/cobra" + + cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/internal/repository" ) var Cmd = &cobra.Command{ @@ -22,7 +22,7 @@ var Cmd = &cobra.Command{ } const examples = `# Read all reports: -cartesi-rollups-cli read reports -a 0x000000000000000000000000000000000` +cartesi-rollups-cli read reports -n echo-dapp` var ( inputIndex uint64 @@ -40,11 +40,17 @@ func init() { func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { - panic("Database was not initialized") + if cmdcommon.Repository == nil { + panic("Repository was not initialized") } - application := common.HexToAddress(cmdcommon.ApplicationAddress) + var nameOrAddress string + pFlags := cmd.Flags() + if pFlags.Changed("name") { + nameOrAddress = pFlags.Lookup("name").Value.String() + } else if pFlags.Changed("address") { + nameOrAddress = pFlags.Lookup("address").Value.String() + } var result []byte if cmd.Flags().Changed("report-index") { @@ -52,17 +58,21 @@ func run(cmd *cobra.Command, args []string) { fmt.Fprintf(os.Stderr, "Error: Only one of 'output-index' or 'input-index' can be used at a time.\n") os.Exit(1) } - reports, err := cmdcommon.Database.GetReport(ctx, application, reportIndex) + reports, err := cmdcommon.Repository.GetReport(ctx, nameOrAddress, reportIndex) cobra.CheckErr(err) result, err = json.MarshalIndent(reports, "", " ") cobra.CheckErr(err) } else if cmd.Flags().Changed("input-index") { - reports, err := cmdcommon.Database.GetReportsByInputIndex(ctx, application, inputIndex) + f := repository.ReportFilter{InputIndex: &inputIndex} + p := repository.Pagination{} + reports, err := cmdcommon.Repository.ListReports(ctx, nameOrAddress, f, p) cobra.CheckErr(err) result, err = json.MarshalIndent(reports, "", " ") cobra.CheckErr(err) } else { - reports, err := cmdcommon.Database.GetReports(ctx, application) + f := repository.ReportFilter{InputIndex: &inputIndex} + p := repository.Pagination{} + reports, err := cmdcommon.Repository.ListReports(ctx, nameOrAddress, f, p) cobra.CheckErr(err) result, err = json.MarshalIndent(reports, "", " ") cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/send/send.go b/cmd/cartesi-rollups-cli/root/send/send.go index 8a83d6a69..333148db8 100644 --- a/cmd/cartesi-rollups-cli/root/send/send.go +++ b/cmd/cartesi-rollups-cli/root/send/send.go @@ -5,9 +5,15 @@ package send import ( "fmt" + "io" + "os" + "strings" - "github.com/cartesi/rollups-node/pkg/addresses" + cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/pkg/ethutil" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/ethclient" @@ -21,16 +27,24 @@ var Cmd = &cobra.Command{ Run: run, } -const examples = `# Send the string "hi" encoded as hex: -cartesi-rollups-cli send --address 0x00000000000000000000 --payload 0x$(printf "hi" | xxd -p)` +const examples = `# Send the string "hi": +cartesi-rollups-cli send -n echo-dapp --payload "hi" + +# Send the string "hi" encoded as hex: +cartesi-rollups-cli send -n echo-dapp --payload 0x6869 --hex + +# Read from stdin: +echo "hi" | cartesi-rollups-cli send -n echo-dapp` var ( - ethEndpoint string - mnemonic string - account uint32 - hexPayload string - addressBookFile string - applicationAddress string + name string + address string + ethEndpoint string + mnemonic string + account uint32 + inputBoxAddress string + cmdPayload string + isHex bool ) func init() { @@ -43,37 +57,102 @@ func init() { Cmd.Flags().Uint32Var(&account, "account", 0, "account index used to sign the transaction (default: 0)") - Cmd.Flags().StringVarP(&applicationAddress, "address", "a", "", "Application contract address") - cobra.CheckErr(Cmd.MarkFlagRequired("address")) + Cmd.Flags().StringVarP(&name, "name", "n", "", + "Application name") - Cmd.Flags().StringVar(&hexPayload, "payload", "", + Cmd.Flags().StringVarP(&address, "address", "a", "", "Application contract address") + + Cmd.Flags().StringVar(&cmdPayload, "payload", "", "input payload hex-encoded starting with 0x") - cobra.CheckErr(Cmd.MarkFlagRequired("payload")) - Cmd.Flags().StringVar(&addressBookFile, "address-book", "deployment.json", - "if set, load the address book from the given file; else from deployment.json") + Cmd.Flags().BoolVarP(&isHex, "hex", "x", false, + "Force interpretation of --payload as hex.") + + Cmd.Flags().StringVar(&inputBoxAddress, "inputbox-address", "", + "Input Box contract address") + + Cmd.Flags().StringVarP( + &cmdcommon.PostgresEndpoint, + "postgres-endpoint", + "p", + "postgres://postgres:password@localhost:5432/rollupsdb?sslmode=disable", + "Postgres endpoint", + ) + + Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if name == "" && address == "" { + return fmt.Errorf("either 'name' or 'address' must be specified") + } + if name != "" && address != "" { + return fmt.Errorf("only one of 'name' or 'address' can be specified") + } + return cmdcommon.PersistentPreRun(cmd, args) + } +} + +func resolvePayload(cmd *cobra.Command) ([]byte, error) { + if !cmd.Flags().Changed("payload") { + stdinBytes, err := io.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("failed to read from stdin: %w", err) + } + return stdinBytes, nil + } + + if isHex { + return decodeHex(cmdPayload) + } + + return []byte(cmdPayload), nil +} + +func decodeHex(s string) ([]byte, error) { + if !strings.HasPrefix(s, "0x") && !strings.HasPrefix(s, "0X") { + s = "0x" + s + } + + b, err := hexutil.Decode(s) + if err != nil { + return nil, fmt.Errorf("invalid hex payload %q: %w", s, err) + } + return b, nil } func run(cmd *cobra.Command, args []string) { - payload, err := hexutil.Decode(hexPayload) + ctx := cmd.Context() + if cmdcommon.Repository == nil { + panic("Repository was not initialized") + } + + var nameOrAddress string + if cmd.Flags().Changed("name") { + nameOrAddress = name + } else if cmd.Flags().Changed("address") { + nameOrAddress = address + } + + app, err := cmdcommon.Repository.GetApplication(ctx, nameOrAddress) + cobra.CheckErr(err) + + payload, err := resolvePayload(cmd) cobra.CheckErr(err) - ctx := cmd.Context() client, err := ethclient.DialContext(ctx, ethEndpoint) cobra.CheckErr(err) signer, err := ethutil.NewMnemonicSigner(ctx, client, mnemonic, account) cobra.CheckErr(err) - var book *addresses.Book - if addressBookFile != "" { - book, err = addresses.GetBookFromFile(addressBookFile) + if !cmd.Flags().Changed("inputbox-address") { + nconfig, err := repository.LoadNodeConfig[model.NodeConfigValue](ctx, cmdcommon.Repository, model.BaseConfigKey) cobra.CheckErr(err) + inputBoxAddress = nconfig.Value.InputBoxAddress } - appAddr := common.HexToAddress(applicationAddress) + appAddr := app.IApplicationAddress + ibAddr := common.HexToAddress(inputBoxAddress) - inputIndex, blockNumber, err := ethutil.AddInput(ctx, client, book, appAddr, signer, payload) + inputIndex, blockNumber, err := ethutil.AddInput(ctx, client, ibAddr, appAddr, signer, payload) cobra.CheckErr(err) fmt.Printf("Input sent to app at %s. Index: %d BlockNumber: %d\n", appAddr, inputIndex, blockNumber) diff --git a/cmd/cartesi-rollups-cli/root/validate/validate.go b/cmd/cartesi-rollups-cli/root/validate/validate.go index f5087d2ba..75c4af1c9 100644 --- a/cmd/cartesi-rollups-cli/root/validate/validate.go +++ b/cmd/cartesi-rollups-cli/root/validate/validate.go @@ -10,7 +10,6 @@ import ( cmdcommon "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/common" "github.com/cartesi/rollups-node/pkg/ethutil" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/spf13/cobra" ) @@ -20,26 +19,34 @@ var Cmd = &cobra.Command{ Short: "Validates a notice", Example: examples, Run: run, - PreRun: cmdcommon.Setup, } const examples = `# Validates output with index 5: -cartesi-rollups-cli validate --output-index 5 -a 0x000000000000000000000000000000000` +cartesi-rollups-cli validate -n echo-dapp --output-index 5` var ( + name string + address string outputIndex uint64 ethEndpoint string ) func init() { Cmd.Flags().StringVarP( - &cmdcommon.ApplicationAddress, + &name, + "name", + "n", + "", + "Application name", + ) + + Cmd.Flags().StringVarP( + &address, "address", "a", "", "Application contract address", ) - cobra.CheckErr(Cmd.MarkFlagRequired("address")) Cmd.Flags().StringVarP( &cmdcommon.PostgresEndpoint, @@ -56,17 +63,32 @@ func init() { Cmd.Flags().StringVar(ðEndpoint, "eth-endpoint", "http://localhost:8545", "ethereum node JSON-RPC endpoint") + Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if name == "" && address == "" { + return fmt.Errorf("either 'name' or 'address' must be specified") + } + if name != "" && address != "" { + return fmt.Errorf("only one of 'name' or 'address' can be specified") + } + return cmdcommon.PersistentPreRun(cmd, args) + } + } func run(cmd *cobra.Command, args []string) { ctx := cmd.Context() - if cmdcommon.Database == nil { - panic("Database was not initialized") + if cmdcommon.Repository == nil { + panic("Repository was not initialized") } - application := common.HexToAddress(cmdcommon.ApplicationAddress) + var nameOrAddress string + if cmd.Flags().Changed("name") { + nameOrAddress = name + } else if cmd.Flags().Changed("address") { + nameOrAddress = address + } - output, err := cmdcommon.Database.GetOutput(ctx, application, outputIndex) + output, err := cmdcommon.Repository.GetOutput(ctx, nameOrAddress, outputIndex) cobra.CheckErr(err) if output == nil { @@ -74,6 +96,9 @@ func run(cmd *cobra.Command, args []string) { os.Exit(1) } + app, err := cmdcommon.Repository.GetApplication(ctx, nameOrAddress) + cobra.CheckErr(err) + if len(output.OutputHashesSiblings) == 0 { fmt.Fprintf(os.Stderr, "The output with index %d has no associated proof yet\n", outputIndex) os.Exit(0) @@ -82,11 +107,11 @@ func run(cmd *cobra.Command, args []string) { client, err := ethclient.DialContext(ctx, ethEndpoint) cobra.CheckErr(err) - fmt.Printf("Validating output app: %v output_index: %v\n", application, outputIndex) + fmt.Printf("Validating output app: %v (%v) output_index: %v\n", app.Name, app.IApplicationAddress, outputIndex) err = ethutil.ValidateOutput( ctx, client, - application, + app.IApplicationAddress, outputIndex, output.RawData, output.OutputHashesSiblings, diff --git a/cmd/cartesi-rollups-evm-reader/root/root.go b/cmd/cartesi-rollups-evm-reader/root/root.go index 2d4a6f3bc..b268d02d8 100644 --- a/cmd/cartesi-rollups-evm-reader/root/root.go +++ b/cmd/cartesi-rollups-evm-reader/root/root.go @@ -4,13 +4,13 @@ package root import ( + "strings" "time" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/evmreader" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/pkg/service" - "github.com/ethereum/go-ethereum/common" "github.com/spf13/cobra" ) @@ -28,13 +28,15 @@ var ( TelemetryAddress: ":10001", Impl: &readerService, }, - EvmReaderPersistentConfig: model.EvmReaderPersistentConfig{ - DefaultBlock: model.DefaultBlockStatusSafe, + NodeConfig: model.NodeConfig[model.NodeConfigValue]{ + Value: model.NodeConfigValue{ + DefaultBlock: model.DefaultBlock_Finalized, + }, }, MaxStartupTime: 10 * time.Second, } inputBoxAddress service.EthAddress - DefaultBlockString = "safe" + DefaultBlockString = "finalized" ) var Cmd = &cobra.Command{ @@ -77,7 +79,7 @@ func init() { Cmd.Flags().Var(&inputBoxAddress, "inputbox-address", "Input Box contract address") - Cmd.Flags().Uint64VarP(&createInfo.InputBoxDeploymentBlock, + Cmd.Flags().Uint64VarP(&createInfo.Value.InputBoxDeploymentBlock, "inputbox-block-number", "n", 0, "Input Box deployment block number") Cmd.Flags().DurationVar(&createInfo.MaxStartupTime, @@ -88,11 +90,11 @@ func init() { func run(cmd *cobra.Command, args []string) { if cmd.Flags().Changed("default-block") { var err error - createInfo.DefaultBlock, err = config.ToDefaultBlockFromString(DefaultBlockString) + createInfo.Value.DefaultBlock, err = config.ToDefaultBlockFromString(DefaultBlockString) cobra.CheckErr(err) } if cmd.Flags().Changed("inputbox-address") { - createInfo.InputBoxAddress = common.Address(inputBoxAddress) + createInfo.Value.InputBoxAddress = strings.ToLower(inputBoxAddress.String()) } cobra.CheckErr(evmreader.Create(&createInfo, &readerService)) diff --git a/cmd/cartesi-rollups-node/root/root.go b/cmd/cartesi-rollups-node/root/root.go index 519146809..25893d817 100644 --- a/cmd/cartesi-rollups-node/root/root.go +++ b/cmd/cartesi-rollups-node/root/root.go @@ -4,8 +4,11 @@ package root import ( + "strings" "time" + "github.com/cartesi/rollups-node/internal/config" + "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/node" "github.com/cartesi/rollups-node/pkg/service" "github.com/spf13/cobra" @@ -24,8 +27,15 @@ var ( TelemetryAddress: ":10000", Impl: &nodeService, }, + NodeConfig: model.NodeConfig[model.NodeConfigValue]{ + Value: model.NodeConfigValue{ + DefaultBlock: model.DefaultBlock_Finalized, + }, + }, MaxStartupTime: 10 * time.Second, } + DefaultBlockString = "finalized" + inputBoxAddress service.EthAddress ) var Cmd = &cobra.Command{ @@ -40,6 +50,16 @@ func init() { Cmd.Flags().StringVar(&createInfo.TelemetryAddress, "telemetry-address", createInfo.TelemetryAddress, "telemetry address") + Cmd.Flags().StringVarP(&DefaultBlockString, + "default-block", "d", DefaultBlockString, + `Default block to be used when fetching new blocks. + One of 'latest', 'safe', 'pending', 'finalized'`) + Cmd.Flags().Var(&inputBoxAddress, + "inputbox-address", + "Input Box contract address") + Cmd.Flags().Uint64VarP(&createInfo.Value.InputBoxDeploymentBlock, + "inputbox-block-number", "n", 0, + "Input Box deployment block number") Cmd.Flags().Var(&createInfo.LogLevel, "log-level", "log level: debug, info, warn or error") @@ -55,6 +75,15 @@ func init() { } func run(cmd *cobra.Command, args []string) { + if cmd.Flags().Changed("default-block") { + var err error + createInfo.Value.DefaultBlock, err = config.ToDefaultBlockFromString(DefaultBlockString) + cobra.CheckErr(err) + } + if cmd.Flags().Changed("inputbox-address") { + createInfo.Value.InputBoxAddress = strings.ToLower(inputBoxAddress.String()) + } + cobra.CheckErr(node.Create(&createInfo, &nodeService)) nodeService.CreateDefaultHandlers("") cobra.CheckErr(nodeService.Serve()) diff --git a/compose.individual-services.yaml b/compose.individual-services.yaml index ea9e05867..a6c967206 100644 --- a/compose.individual-services.yaml +++ b/compose.individual-services.yaml @@ -105,31 +105,6 @@ services: environment: <<: *env - graphql_server: - image: graphile/postgraphile:4.14.0 - init: true - command: >- - --retry-on-init-fail - --dynamic-json - --no-setof-functions-contain-nulls - --no-ignore-rbac - --enable-query-batching - --enhance-graphiql - --extended-errors errcode - --legacy-relations omit - --connection postgres://postgres:password@database:5432/rollupsdb?sslmode=disable - --schema graphql - --host 0.0.0.0 - --port 10004 -# --append-plugins @graphile-contrib/pg-simplify-inflector - depends_on: - database: - condition: service_healthy - networks: - - devnet - ports: - - "10004:10004" # postgraphile - volumes: node_data: diff --git a/compose.yaml b/compose.yaml index a8e5e7b1b..9074a938e 100644 --- a/compose.yaml +++ b/compose.yaml @@ -44,33 +44,6 @@ services: environment: <<: *env - graphql_server: - image: graphile/postgraphile:4.14.0 - init: true - command: >- - --retry-on-init-fail - --dynamic-json - --no-setof-functions-contain-nulls - --no-ignore-rbac - --enable-query-batching - --enhance-graphiql - --extended-errors errcode - --legacy-relations omit - --connection postgres://postgres:password@database:5432/rollupsdb?sslmode=disable - --schema graphql - --host 0.0.0.0 - --port 10004 -# --append-plugins @graphile-contrib/pg-simplify-inflector - depends_on: - database: - condition: service_healthy - node: - condition: service_healthy - networks: - - devnet - ports: - - "10004:10004" # postgraphile - node: image: cartesi/rollups-node:devel command: cartesi-rollups-node diff --git a/dev/migrate/main.go b/dev/migrate/main.go index fb6a76475..76415ec7e 100644 --- a/dev/migrate/main.go +++ b/dev/migrate/main.go @@ -10,7 +10,7 @@ import ( "time" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/repository/schema" + "github.com/cartesi/rollups-node/internal/repository/postgres/schema" ) func main() { diff --git a/dev/tools.go b/dev/tools.go index 11e9affde..84a45da94 100644 --- a/dev/tools.go +++ b/dev/tools.go @@ -7,4 +7,6 @@ package main import ( _ "github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen" + // Import the Jet CLI tool for code generation + _ "github.com/go-jet/jet/v2/cmd/jet" ) diff --git a/go.mod b/go.mod index b6ca83288..30fbb591c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/cartesi/rollups-node go 1.23.2 require ( - github.com/ethereum/go-ethereum v1.14.11 + github.com/ethereum/go-ethereum v1.14.12 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 github.com/tyler-smith/go-bip32 v1.0.0 @@ -13,65 +13,75 @@ require ( require github.com/BurntSushi/toml v1.4.0 require ( - github.com/aws/aws-sdk-go-v2 v1.32.2 - github.com/aws/aws-sdk-go-v2/config v1.18.45 - github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 + github.com/aws/aws-sdk-go-v2 v1.32.6 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/service/kms v1.37.7 github.com/deepmap/oapi-codegen/v2 v2.1.0 + github.com/go-jet/jet/v2 v2.12.0 github.com/golang-migrate/migrate/v4 v4.18.1 github.com/jackc/pgx/v5 v5.7.1 - github.com/lmittmann/tint v1.0.5 + github.com/lmittmann/tint v1.0.6 github.com/oapi-codegen/runtime v1.1.1 - golang.org/x/sync v0.8.0 - golang.org/x/text v0.19.0 + golang.org/x/sync v0.10.0 + golang.org/x/text v0.21.0 ) require ( + filippo.io/edwards25519 v1.1.0 // indirect github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect - github.com/aws/smithy-go v1.22.0 // indirect - github.com/bits-and-blooms/bitset v1.14.3 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect - github.com/consensys/bavard v0.1.22 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect + github.com/consensys/bavard v0.1.24 // indirect github.com/consensys/gnark-crypto v0.14.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/deckarep/golang-set/v2 v2.7.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/ethereum/c-kzg-4844 v1.0.3 // indirect - github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/getkin/kin-openapi v0.123.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/holiman/uint256 v1.3.1 // indirect + github.com/holiman/uint256 v1.3.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa // indirect + github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgtype v1.14.4 // indirect + github.com/jackc/pgx/v4 v4.18.3 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/lib/pq v1.10.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-sqlite3 v1.14.24 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect @@ -88,11 +98,11 @@ require ( github.com/tklauser/numcpus v0.9.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/tools v0.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect diff --git a/go.sum b/go.sum index 4fd667e66..ddb36d8d7 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,8 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= @@ -8,6 +11,8 @@ github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25Yn github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= @@ -15,51 +20,47 @@ github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjC github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= -github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= -github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= -github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes= -github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= -github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= -github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 h1:tfBABi5R6aSZlhgTWHxL+opYUDOnIGoNcJLwVYv0jLM= -github.com/aws/aws-sdk-go-v2/service/kms v1.37.2/go.mod h1:dZYFcQwuoh+cLOlFnZItijZptmyDhRIkOKWFO1CfzV8= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= -github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= -github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= +github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.7 h1:dZmNIRtPUvtvUIIDVNpvtnJQ8N8Iqm7SQAxf18htZYw= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.7/go.mod h1:vj8PlfJH9mnGeIzd6uMLPi5VgiqzGG7AZoe1kf1uTXM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.14.3 h1:Gd2c8lSNf9pKXom5JtD7AaKO8o7fGQ2LtFj1436qilA= -github.com/bits-and-blooms/bitset v1.14.3/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= -github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e h1:0XBUw73chJ1VYSsfvcPvVT7auykAJce9FpRr10L6Qhw= github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q18lJe1rIoLUqjM+CB1zYrRg44ZqGuQSA= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= @@ -72,22 +73,25 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A= -github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= +github.com/consensys/bavard v0.1.24 h1:Lfe+bjYbpaoT7K5JTFoMi5wo9V4REGLvQQbHmatoN2I= +github.com/consensys/bavard v0.1.24/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E= github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= -github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= -github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.7.0 h1:gIloKvD7yH2oip4VLhsv3JyLLFnC0Y2mlusgcvJYW5k= +github.com/deckarep/golang-set/v2 v2.7.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= @@ -106,20 +110,22 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs= github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY= -github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= -github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A= -github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= +github.com/ethereum/go-ethereum v1.14.12 h1:8hl57x77HSUo+cXExrURjU/w1VhL+ShCTJrTwcCQSe4= +github.com/ethereum/go-ethereum v1.14.12/go.mod h1:RAC2gVMWJ6FkxSPESfbshrcKpIokgQKsVKmAuqdekDY= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/getkin/kin-openapi v0.123.0 h1:zIik0mRwFNLyvtXK274Q6ut+dPh6nlxBp0x7mNrPhs8= github.com/getkin/kin-openapi v0.123.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-jet/jet/v2 v2.12.0 h1:z2JfvBAZgsfxlQz6NXBYdZTXc7ep3jhbszTLtETv1JE= +github.com/go-jet/jet/v2 v2.12.0/go.mod h1:ufQVRQeI1mbcO5R8uCEVcVf3Foej9kReBdwDx7YMWUM= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -131,14 +137,19 @@ github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1 github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -146,11 +157,11 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -167,53 +178,116 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= -github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lmittmann/tint v1.0.5 h1:NQclAutOfYsqs2F1Lenue6OoWCajs5wJcP3DfWVpePw= -github.com/lmittmann/tint v1.0.5/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= +github.com/lmittmann/tint v1.0.6 h1:vkkuDAZXc0EFGNzYjWcV0h7eEX+uujH48f/ifSkJWgc= +github.com/lmittmann/tint v1.0.6/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -241,6 +315,7 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -255,27 +330,46 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= -github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= -github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= @@ -296,8 +390,10 @@ github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= @@ -306,50 +402,136 @@ go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2 go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= diff --git a/internal/advancer/advancer.go b/internal/advancer/advancer.go index 8d296cba4..b483c85ad 100644 --- a/internal/advancer/advancer.go +++ b/internal/advancer/advancer.go @@ -7,18 +7,15 @@ import ( "context" "errors" "fmt" - "log/slog" "net/http" "time" "github.com/cartesi/rollups-node/internal/advancer/machines" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/services" - "github.com/cartesi/rollups-node/internal/inspect" . "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/nodemachine" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/pkg/rollupsmachine/cartesimachine" "github.com/cartesi/rollups-node/pkg/service" ) @@ -32,23 +29,22 @@ var ( ) type IAdvancerRepository interface { - // Only needs Id, Index, and RawData fields from the retrieved Inputs. - GetUnprocessedInputs(_ context.Context, apps []Address) (map[Address][]*Input, error) - StoreAdvanceResult(context.Context, *Input, *nodemachine.AdvanceResult) error - UpdateClosedEpochs(_ context.Context, app Address) error + ListInputs(ctx context.Context, nameOrAddress string, f repository.InputFilter, p repository.Pagination) ([]*Input, error) + StoreAdvanceResult(ctx context.Context, appID int64, ar *AdvanceResult) error + UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) error } type IAdvancerMachines interface { - GetAdvanceMachine(app Address) (machines.AdvanceMachine, bool) + GetAdvanceMachine(appId int64) (machines.AdvanceMachine, bool) UpdateMachines(ctx context.Context) error - Apps() []Address + Apps() []*Application } type Service struct { service.Service repository IAdvancerRepository machines IAdvancerMachines - inspector inspect.Inspector + inspector *inspect.Inspector HTTPServer *http.Server HTTPServerFunc func() error } @@ -58,7 +54,7 @@ type CreateInfo struct { AdvancerPollingInterval time.Duration PostgresEndpoint config.Redacted[string] PostgresSslMode bool - Repository *repository.Database + Repository repository.Repository MachineServerVerbosity config.Redacted[cartesimachine.ServerVerbosity] Machines *machines.Machines MaxStartupTime time.Duration @@ -86,7 +82,7 @@ func Create(c *CreateInfo, s *Service) error { return service.WithTimeout(c.MaxStartupTime, func() error { if s.repository == nil { if c.Repository == nil { - c.Repository, err = repository.Connect(s.Context, c.PostgresEndpoint.Value, s.Logger) + c.Repository, err = factory.NewRepositoryFromConnectionString(s.Context, c.PostgresEndpoint.Value) if err != nil { return err } @@ -107,20 +103,13 @@ func Create(c *CreateInfo, s *Service) error { // allow partial construction for testing if c.Machines != nil { - logger := service.NewLogger(slog.Level(c.LogLevel), c.LogPretty) - logger = logger.With("service", "inspect") - s.inspector = inspect.Inspector{ - IInspectMachines: c.Machines, - Logger: logger, - ServeMux: http.NewServeMux(), - } - - s.inspector.ServeMux.Handle("/inspect/{dapp}", - services.CorsMiddleware(http.Handler(&s.inspector))) - s.inspector.ServeMux.Handle("/inspect/{dapp}/{payload}", - services.CorsMiddleware(http.Handler(&s.inspector))) - s.HTTPServer, s.HTTPServerFunc = s.inspector.CreateInspectServer( - c.InspectAddress, 3, 5*time.Second, s.inspector.ServeMux) + s.inspector, s.HTTPServer, s.HTTPServerFunc = inspect.NewInspector( + c.Repository, + c.Machines, + c.InspectAddress, + c.LogLevel, + c.LogPretty, + ) go s.HTTPServerFunc() } return nil @@ -148,6 +137,11 @@ func (v *Service) String() string { return v.Name } +func getUnprocessedInputs(ctx context.Context, mr IAdvancerRepository, appAddress string) ([]*Input, error) { + f := repository.InputFilter{Status: Pointer(InputCompletionStatus_None)} + return mr.ListInputs(ctx, appAddress, f, repository.Pagination{}) +} + // Step steps the Advancer for one processing cycle. // It gets unprocessed inputs from the repository, // runs them through the cartesi machine, @@ -161,25 +155,24 @@ func (advancer *Service) Step(ctx context.Context) error { apps := advancer.machines.Apps() - // Gets the unprocessed inputs (of all apps) from the repository. - advancer.Logger.Debug("querying for unprocessed inputs") - inputs, err := advancer.repository.GetUnprocessedInputs(ctx, apps) - if err != nil { - return err - } + // Updates the status of the epochs. + for _, app := range apps { + // Gets the unprocessed inputs (of all apps) from the repository. + advancer.Logger.Debug("querying for unprocessed inputs") - // Processes each set of inputs. - for app, inputs := range inputs { - advancer.Logger.Debug(fmt.Sprintf("processing %d input(s) from %v", len(inputs), app)) - err := advancer.process(ctx, app, inputs) + inputs, err := getUnprocessedInputs(ctx, advancer.repository, app.IApplicationAddress.String()) if err != nil { return err } - } - // Updates the status of the epochs. - for _, app := range apps { - err := advancer.repository.UpdateClosedEpochs(ctx, app) + // Processes each set of inputs. + advancer.Logger.Debug(fmt.Sprintf("processing %d input(s) from %s", len(inputs), app.Name)) + err = advancer.process(ctx, app, inputs) + if err != nil { + return err + } + + err = advancer.repository.UpdateEpochsInputsProcessed(ctx, app.IApplicationAddress.String()) if err != nil { return err } @@ -189,21 +182,20 @@ func (advancer *Service) Step(ctx context.Context) error { } // process sequentially processes inputs from the the application. -func (advancer *Service) process(ctx context.Context, app Address, inputs []*Input) error { +func (advancer *Service) process(ctx context.Context, app *Application, inputs []*Input) error { // Asserts that the app has an associated machine. - machine, exists := advancer.machines.GetAdvanceMachine(app) + machine, exists := advancer.machines.GetAdvanceMachine(app.ID) if !exists { - panic(fmt.Errorf("%w %s", ErrNoApp, app.String())) + return fmt.Errorf("%w %d", ErrNoApp, app.ID) } - // Asserts that there are inputs to process. if len(inputs) <= 0 { - panic(ErrNoInputs) + return nil } // FIXME if theres a change in epoch id call update epochs for _, input := range inputs { - advancer.Logger.Info("Processing input", "app", app, "id", input.Id, "index", input.Index) + advancer.Logger.Info("Processing input", "application", app.Name, "index", input.Index) // Sends the input to the cartesi machine. res, err := machine.Advance(ctx, input.RawData, input.Index) @@ -212,7 +204,7 @@ func (advancer *Service) process(ctx context.Context, app Address, inputs []*Inp } // Stores the result in the database. - err = advancer.repository.StoreAdvanceResult(ctx, input, res) + err = advancer.repository.StoreAdvanceResult(ctx, input.EpochApplicationID, res) if err != nil { return err } diff --git a/internal/advancer/advancer_test.go b/internal/advancer/advancer_test.go index 7f12cbc94..62a478a6f 100644 --- a/internal/advancer/advancer_test.go +++ b/internal/advancer/advancer_test.go @@ -15,8 +15,9 @@ import ( "github.com/cartesi/rollups-node/internal/advancer/machines" . "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/nodemachine" + "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/suite" ) @@ -46,22 +47,22 @@ func (s *AdvancerSuite) TestRun() { require := s.Require() machines := newMockMachines() - app1 := randomAddress() - machines.Map[app1] = &MockMachine{} - app2 := randomAddress() - machines.Map[app2] = &MockMachine{} - res1 := randomAdvanceResult() - res2 := randomAdvanceResult() - res3 := randomAdvanceResult() + app1 := newMockMachine(1) + app2 := newMockMachine(2) + machines.Map[1] = *app1 + machines.Map[2] = *app2 + res1 := randomAdvanceResult(1) + res2 := randomAdvanceResult(2) + res3 := randomAdvanceResult(3) repository := &MockRepository{ - GetInputsReturn: map[Address][]*Input{ - app1: { - {Id: 1, RawData: marshal(res1)}, - {Id: 2, RawData: marshal(res2)}, + GetInputsReturn: map[common.Address][]*Input{ + app1.Application.IApplicationAddress: { + newInput(app1.Application.ID, 0, 0, marshal(res1)), + newInput(app1.Application.ID, 0, 1, marshal(res2)), }, - app2: { - {Id: 5, RawData: marshal(res3)}, + app2.Application.IApplicationAddress: { + newInput(app2.Application.ID, 0, 0, marshal(res3)), }, }, } @@ -84,16 +85,16 @@ func (s *AdvancerSuite) TestRun() { } func (s *AdvancerSuite) TestProcess() { - setup := func() (IAdvancerMachines, *MockRepository, *Service, Address) { + setup := func() (IAdvancerMachines, *MockRepository, *Service, *MockMachine) { require := s.Require() - app := randomAddress() machines := newMockMachines() - machines.Map[app] = &MockMachine{} + app1 := newMockMachine(1) + machines.Map[1] = *app1 repository := &MockRepository{} advancer, err := newMock(machines, repository) require.Nil(err) - return machines, repository, advancer, app + return machines, repository, advancer, app1 } s.Run("Ok", func() { @@ -101,58 +102,56 @@ func (s *AdvancerSuite) TestProcess() { _, repository, advancer, app := setup() inputs := []*Input{ - {Id: 1, RawData: marshal(randomAdvanceResult())}, - {Id: 2, RawData: marshal(randomAdvanceResult())}, - {Id: 3, RawData: marshal(randomAdvanceResult())}, - {Id: 4, RawData: marshal(randomAdvanceResult())}, - {Id: 5, RawData: marshal(randomAdvanceResult())}, - {Id: 6, RawData: marshal(randomAdvanceResult())}, - {Id: 7, RawData: marshal(randomAdvanceResult())}, + newInput(app.Application.ID, 0, 0, marshal(randomAdvanceResult(0))), + newInput(app.Application.ID, 0, 1, marshal(randomAdvanceResult(1))), + newInput(app.Application.ID, 0, 2, marshal(randomAdvanceResult(2))), + newInput(app.Application.ID, 0, 3, marshal(randomAdvanceResult(3))), + newInput(app.Application.ID, 1, 4, marshal(randomAdvanceResult(4))), + newInput(app.Application.ID, 1, 5, marshal(randomAdvanceResult(5))), + newInput(app.Application.ID, 2, 6, marshal(randomAdvanceResult(6))), } - err := advancer.process(context.Background(), app, inputs) + err := advancer.process(context.Background(), app.Application, inputs) require.Nil(err) require.Len(repository.StoredResults, 7) }) - s.Run("Panic", func() { - s.Run("ErrApp", func() { + s.Run("Noop", func() { + s.Run("NoInputs", func() { require := s.Require() - invalidApp := randomAddress() - _, _, advancer, _ := setup() - inputs := randomInputs(3) + _, _, advancer, app := setup() + inputs := []*Input{} - expected := fmt.Sprintf("%v %v", ErrNoApp, invalidApp) - require.PanicsWithError(expected, func() { - _ = advancer.process(context.Background(), invalidApp, inputs) - }) + err := advancer.process(context.Background(), app.Application, inputs) + require.Nil(err) }) + }) - s.Run("ErrInputs", func() { + s.Run("Error", func() { + s.Run("ErrApp", func() { require := s.Require() - _, _, advancer, app := setup() - inputs := []*Input{} + invalidApp := Application{ID: 999} + _, _, advancer, _ := setup() + inputs := randomInputs(1, 0, 3) - require.PanicsWithValue(ErrNoInputs, func() { - _ = advancer.process(context.Background(), app, inputs) - }) + err := advancer.process(context.Background(), &invalidApp, inputs) + expected := fmt.Sprintf("%v %v", ErrNoApp, invalidApp.ID) + require.Errorf(err, expected) }) - }) - s.Run("Error", func() { s.Run("Advance", func() { require := s.Require() _, repository, advancer, app := setup() inputs := []*Input{ - {Id: 1, RawData: marshal(randomAdvanceResult())}, - {Id: 2, RawData: []byte("advance error")}, - {Id: 3, RawData: []byte("unreachable")}, + newInput(app.Application.ID, 0, 0, marshal(randomAdvanceResult(0))), + newInput(app.Application.ID, 0, 1, []byte("advance error")), + newInput(app.Application.ID, 0, 2, []byte("unreachable")), } - err := advancer.process(context.Background(), app, inputs) + err := advancer.process(context.Background(), app.Application, inputs) require.Errorf(err, "advance error") require.Len(repository.StoredResults, 1) }) @@ -162,12 +161,12 @@ func (s *AdvancerSuite) TestProcess() { _, repository, advancer, app := setup() inputs := []*Input{ - {Id: 1, RawData: marshal(randomAdvanceResult())}, - {Id: 2, RawData: []byte("unreachable")}, + newInput(app.Application.ID, 0, 0, marshal(randomAdvanceResult(0))), + newInput(app.Application.ID, 0, 1, []byte("unreachable")), } repository.StoreAdvanceError = errors.New("store-advance error") - err := advancer.process(context.Background(), app, inputs) + err := advancer.process(context.Background(), app.Application, inputs) require.Errorf(err, "store-advance error") require.Len(repository.StoredResults, 1) }) @@ -176,14 +175,16 @@ func (s *AdvancerSuite) TestProcess() { // ------------------------------------------------------------------------------------------------ -type MockMachine struct{} +type MockMachine struct { + Application *Application +} func (mock *MockMachine) Advance( _ context.Context, input []byte, _ uint64, -) (*nodemachine.AdvanceResult, error) { - var res nodemachine.AdvanceResult +) (*AdvanceResult, error) { + var res AdvanceResult err := json.Unmarshal(input, &res) if err != nil { return nil, errors.New(string(input)) @@ -191,80 +192,98 @@ func (mock *MockMachine) Advance( return &res, nil } +func newMockMachine(id int64) *MockMachine { + return &MockMachine{ + Application: &Application{ + ID: id, + IApplicationAddress: randomAddress(), + }, + } +} + // ------------------------------------------------------------------------------------------------ type MachinesMock struct { - Map map[Address]machines.AdvanceMachine + Map map[int64]MockMachine } func newMockMachines() *MachinesMock { return &MachinesMock{ - Map: map[Address]machines.AdvanceMachine{}, + Map: map[int64]MockMachine{}, } } -func (mock *MachinesMock) GetAdvanceMachine(app Address) (machines.AdvanceMachine, bool) { - machine, exists := mock.Map[app] - return machine, exists +func (mock *MachinesMock) GetAdvanceMachine(appID int64) (machines.AdvanceMachine, bool) { + machine, exists := mock.Map[appID] + return &machine, exists } -func (m *MachinesMock) UpdateMachines(ctx context.Context) error { +func (mock *MachinesMock) UpdateMachines(ctx context.Context) error { return nil // FIXME } -func (mock *MachinesMock) Apps() []Address { - return []Address{} +func (mock *MachinesMock) Apps() []*Application { + keys := make([]*Application, len(mock.Map)) + i := 0 + for _, v := range mock.Map { + keys[i] = v.Application + i++ + } + return keys } // ------------------------------------------------------------------------------------------------ type MockRepository struct { - GetInputsReturn map[Address][]*Input + GetInputsReturn map[common.Address][]*Input GetInputsError error StoreAdvanceError error UpdateEpochsError error - StoredResults []*nodemachine.AdvanceResult + StoredResults []*AdvanceResult } -func (mock *MockRepository) GetUnprocessedInputs( - _ context.Context, - appAddresses []Address, -) (map[Address][]*Input, error) { - return mock.GetInputsReturn, mock.GetInputsError +func (mock *MockRepository) ListInputs( + ctx context.Context, + nameOrAddress string, + f repository.InputFilter, + p repository.Pagination, +) ([]*Input, error) { + address := common.HexToAddress(nameOrAddress) + return mock.GetInputsReturn[address], mock.GetInputsError } func (mock *MockRepository) StoreAdvanceResult( _ context.Context, - input *Input, - res *nodemachine.AdvanceResult, + appID int64, + res *AdvanceResult, ) error { mock.StoredResults = append(mock.StoredResults, res) return mock.StoreAdvanceError } -func (mock *MockRepository) UpdateClosedEpochs(_ context.Context, _ Address) error { +func (mock *MockRepository) UpdateEpochsInputsProcessed(_ context.Context, nameOrAddress string) error { return mock.UpdateEpochsError } // ------------------------------------------------------------------------------------------------ -func randomAddress() Address { +func randomAddress() common.Address { address := make([]byte, 20) _, err := crand.Read(address) if err != nil { panic(err) } - return Address(address) + return common.BytesToAddress(address) } -func randomHash() Hash { +func randomHash() common.Hash { hash := make([]byte, 32) _, err := crand.Read(hash) if err != nil { panic(err) } - return Hash(hash) + return common.BytesToHash(hash) } func randomBytes() []byte { @@ -286,28 +305,38 @@ func randomSliceOfBytes() [][]byte { return slice } -func randomInputs(size int) []*Input { +func newInput(appId int64, epochIndex uint64, inputIndex uint64, data []byte) *Input { + return &Input{ + EpochApplicationID: appId, + EpochIndex: epochIndex, + Index: inputIndex, + RawData: data, + } +} + +func randomInputs(appId int64, epochIndex uint64, size int) []*Input { slice := make([]*Input, size) for i := 0; i < size; i++ { - slice[i] = &Input{Id: uint64(i), RawData: randomBytes()} + slice[i] = newInput(appId, epochIndex, uint64(i), randomBytes()) } return slice } -func randomAdvanceResult() *nodemachine.AdvanceResult { - res := &nodemachine.AdvanceResult{ - Status: InputStatusAccepted, +func randomAdvanceResult(inputIndex uint64) *AdvanceResult { + hash := randomHash() + res := &AdvanceResult{ + InputIndex: inputIndex, + Status: InputCompletionStatus_Accepted, Outputs: randomSliceOfBytes(), Reports: randomSliceOfBytes(), OutputsHash: randomHash(), - MachineHash: new(Hash), + MachineHash: &hash, } - *res.MachineHash = randomHash() return res } -func marshal(res *nodemachine.AdvanceResult) []byte { +func marshal(res *AdvanceResult) []byte { data, err := json.Marshal(*res) if err != nil { panic(err) diff --git a/internal/advancer/machines/machines.go b/internal/advancer/machines/machines.go index af0f552d9..9c21e9180 100644 --- a/internal/advancer/machines/machines.go +++ b/internal/advancer/machines/machines.go @@ -12,6 +12,7 @@ import ( "sync" . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" nm "github.com/cartesi/rollups-node/internal/nodemachine" "github.com/cartesi/rollups-node/pkg/emulator" @@ -19,35 +20,40 @@ import ( cm "github.com/cartesi/rollups-node/pkg/rollupsmachine/cartesimachine" ) -type Repository interface { +type MachinesRepository interface { // GetMachineConfigurations retrieves a machine configuration for each application. - GetMachineConfigurations(context.Context) ([]*MachineConfig, error) + ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination) ([]*Application, error) // GetProcessedInputs retrieves the processed inputs of an application with indexes greater or // equal to the given input index. - GetProcessedInputs(_ context.Context, app Address, index uint64) ([]*Input, error) + ListInputs(ctx context.Context, nameOrAddress string, f repository.InputFilter, p repository.Pagination) ([]*Input, error) } // AdvanceMachine masks nodemachine.NodeMachine to only expose methods required by the Advancer. type AdvanceMachine interface { - Advance(_ context.Context, input []byte, index uint64) (*nm.AdvanceResult, error) + Advance(_ context.Context, input []byte, index uint64) (*AdvanceResult, error) } // InspectMachine masks nodemachine.NodeMachine to only expose methods required by the Inspector. type InspectMachine interface { - Inspect(_ context.Context, query []byte) (*nm.InspectResult, error) + Inspect(_ context.Context, query []byte) (*InspectResult, error) } // Machines is a thread-safe type that manages the pool of cartesi machines being used by the node. // It contains a map of applications to machines. type Machines struct { mutex sync.RWMutex - machines map[Address]*nm.NodeMachine - repository Repository + machines map[int64]*nm.NodeMachine + repository MachinesRepository verbosity cm.ServerVerbosity Logger *slog.Logger } +func getAllRunningApplications(ctx context.Context, mr MachinesRepository) ([]*Application, error) { + f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled)} + return mr.ListApplications(ctx, f, repository.Pagination{}) +} + // Load initializes the cartesi machines. // Load advances a machine to the last processed input stored in the database. // @@ -55,36 +61,36 @@ type Machines struct { // It stores the error to be returned later and continues to initialize the other machines. func Load( ctx context.Context, - repo Repository, + repo MachinesRepository, verbosity cm.ServerVerbosity, logger *slog.Logger, ) (*Machines, error) { - configs, err := repo.GetMachineConfigurations(ctx) + apps, err := getAllRunningApplications(ctx, repo) if err != nil { return nil, err } - machines := map[Address]*nm.NodeMachine{} + machines := map[int64]*nm.NodeMachine{} var errs error - for _, config := range configs { + for _, app := range apps { // Creates the machine. - machine, err := createMachine(ctx, verbosity, config, logger) + machine, err := createMachine(ctx, verbosity, app, logger) if err != nil { - err = fmt.Errorf("failed to create machine from snapshot (%v): %w", config, err) + err = fmt.Errorf("failed to create machine from snapshot %s (%s): %w", app.TemplateURI, app.Name, err) errs = errors.Join(errs, err) continue } // Advances the machine until it catches up with the state of the database (if necessary). - err = catchUp(ctx, repo, config.AppAddress, machine, config.ProcessedInputs, logger) + err = catchUp(ctx, repo, app, machine, logger) if err != nil { - err = fmt.Errorf("failed to advance cartesi machine (%v): %w", config, err) + err = fmt.Errorf("failed to advance cartesi machine (%v): %w", app, err) errs = errors.Join(errs, err, machine.Close()) continue } - machines[config.AppAddress] = machine + machines[app.ID] = machine } return &Machines{ @@ -96,109 +102,109 @@ func Load( } func (m *Machines) UpdateMachines(ctx context.Context) error { - configs, err := m.repository.GetMachineConfigurations(ctx) + apps, err := getAllRunningApplications(ctx, m.repository) if err != nil { return err } - for _, config := range configs { - if m.Exists(config.AppAddress) { + for _, app := range apps { + if m.Exists(app.ID) { continue } - machine, err := createMachine(ctx, m.verbosity, config, m.Logger) + machine, err := createMachine(ctx, m.verbosity, app, m.Logger) if err != nil { - m.Logger.Error("Failed to create machine", "app", config.AppAddress, "error", err) + m.Logger.Error("Failed to create machine", "application", app.IApplicationAddress, "error", err) continue } - err = catchUp(ctx, m.repository, config.AppAddress, machine, config.ProcessedInputs, m.Logger) + err = catchUp(ctx, m.repository, app, machine, m.Logger) if err != nil { - m.Logger.Error("Failed to sync the machine", "app", config.AppAddress, "error", err) + m.Logger.Error("Failed to sync the machine", "application", app.IApplicationAddress, "error", err) machine.Close() continue } - m.Add(config.AppAddress, machine) + m.Add(app.ID, machine) } - m.RemoveAbsent(configs) + m.RemoveAbsent(apps) return nil } // GetAdvanceMachine gets the machine associated with the application from the map. -func (m *Machines) GetAdvanceMachine(app Address) (AdvanceMachine, bool) { - return m.getMachine(app) +func (m *Machines) GetAdvanceMachine(appId int64) (AdvanceMachine, bool) { + return m.getMachine(appId) } // GetInspectMachine gets the machine associated with the application from the map. -func (m *Machines) GetInspectMachine(app Address) (InspectMachine, bool) { - return m.getMachine(app) +func (m *Machines) GetInspectMachine(appId int64) (InspectMachine, bool) { + return m.getMachine(appId) } // Add maps a new application to a machine. // It does nothing if the application is already mapped to some machine. // It returns true if it was able to add the machine and false otherwise. -func (m *Machines) Add(app Address, machine *nm.NodeMachine) bool { +func (m *Machines) Add(appId int64, machine *nm.NodeMachine) bool { m.mutex.Lock() defer m.mutex.Unlock() - if _, ok := m.machines[app]; ok { + if _, ok := m.machines[appId]; ok { return false } else { - m.machines[app] = machine + m.machines[appId] = machine return true } } -func (m *Machines) Exists(app Address) bool { +func (m *Machines) Exists(appId int64) bool { m.mutex.Lock() defer m.mutex.Unlock() - _, exists := m.machines[app] + _, exists := m.machines[appId] return exists } -func (m *Machines) RemoveAbsent(configs []*MachineConfig) { +func (m *Machines) RemoveAbsent(apps []*Application) { m.mutex.Lock() defer m.mutex.Unlock() - configMap := make(map[Address]bool) - for _, config := range configs { - configMap[config.AppAddress] = true + configMap := make(map[int64]bool) + for _, app := range apps { + configMap[app.ID] = true } - for address, machine := range m.machines { - if !configMap[address] { - m.Logger.Info("Application was disabled, shutting down machine", "application", address) + for id, machine := range m.machines { + if !configMap[id] { + m.Logger.Info("Application was disabled, shutting down machine", "application", machine.Application.Name) machine.Close() - delete(m.machines, address) + delete(m.machines, id) } } } // Delete deletes an application from the map. // It returns the associated machine, if any. -func (m *Machines) Delete(app Address) *nm.NodeMachine { +func (m *Machines) Delete(appId int64) *nm.NodeMachine { m.mutex.Lock() defer m.mutex.Unlock() - if machine, ok := m.machines[app]; ok { + if machine, ok := m.machines[appId]; ok { return nil } else { - delete(m.machines, app) + delete(m.machines, appId) return machine } } // Apps returns the addresses of the applications for which there are machines. -func (m *Machines) Apps() []Address { +func (m *Machines) Apps() []*Application { m.mutex.RLock() defer m.mutex.RUnlock() - keys := make([]Address, len(m.machines)) + keys := make([]*Application, len(m.machines)) i := 0 - for k := range m.machines { - keys[i] = k + for _, v := range m.machines { + keys[i] = v.Application i++ } return keys @@ -218,14 +224,14 @@ func (m *Machines) Close() error { // ------------------------------------------------------------------------------------------------ -func (m *Machines) getMachine(app Address) (*nm.NodeMachine, bool) { +func (m *Machines) getMachine(appId int64) (*nm.NodeMachine, bool) { m.mutex.RLock() defer m.mutex.RUnlock() - machine, exists := m.machines[app] + machine, exists := m.machines[appId] return machine, exists } -func closeMachines(machines map[Address]*nm.NodeMachine) (err error) { +func closeMachines(machines map[int64]*nm.NodeMachine) (err error) { for _, machine := range machines { err = errors.Join(err, machine.Close()) } @@ -237,35 +243,36 @@ func closeMachines(machines map[Address]*nm.NodeMachine) (err error) { func createMachine(ctx context.Context, verbosity cm.ServerVerbosity, - config *MachineConfig, + app *Application, logger *slog.Logger, ) (*nm.NodeMachine, error) { - logger.Info("creating machine", "application", config.AppAddress, - "template-path", config.SnapshotPath) - logger.Debug("instantiating remote machine server", "application", config.AppAddress) + appAddress := app.IApplicationAddress.String() + logger.Info("creating machine", "application", app.Name, "address", appAddress, + "template-path", app.TemplateURI) + logger.Debug("instantiating remote machine server", "application", app.Name, "address", appAddress) // Starts the server. address, err := cm.StartServer(logger, verbosity, 0, os.Stdout, os.Stderr) if err != nil { return nil, err } - logger.Info("loading machine on server", "application", config.AppAddress, - "remote-machine", address, "template-path", config.SnapshotPath) + logger.Info("loading machine on server", "application", app.Name, "address", appAddress, + "remote-machine", address, "template-path", app.TemplateURI) // Creates a CartesiMachine from the snapshot. runtimeConfig := &emulator.MachineRuntimeConfig{} - cartesiMachine, err := cm.Load(ctx, config.SnapshotPath, address, runtimeConfig) + cartesiMachine, err := cm.Load(ctx, app.TemplateURI, address, runtimeConfig) if err != nil { return nil, errors.Join(err, cm.StopServer(address, logger)) } - logger.Debug("machine loaded on server", "application", config.AppAddress, - "remote-machine", address, "template-path", config.SnapshotPath) + logger.Debug("machine loaded on server", "application", app.Name, "address", appAddress, + "remote-machine", address, "template-path", app.TemplateURI) // Creates a RollupsMachine from the CartesiMachine. rollupsMachine, err := rm.New(ctx, cartesiMachine, - config.AdvanceIncCycles, - config.AdvanceMaxCycles, + app.ExecutionParameters.AdvanceIncCycles, + app.ExecutionParameters.AdvanceMaxCycles, logger, ) if err != nil { @@ -273,11 +280,13 @@ func createMachine(ctx context.Context, } // Creates a NodeMachine from the RollupsMachine. - nodeMachine, err := nm.NewNodeMachine(rollupsMachine, - config.ProcessedInputs, - config.AdvanceMaxDeadline, - config.InspectMaxDeadline, - config.MaxConcurrentInspects) + nodeMachine, err := nm.NewNodeMachine( + app, + rollupsMachine, + 0, + app.ExecutionParameters.AdvanceMaxDeadline, + app.ExecutionParameters.InspectMaxDeadline, + app.ExecutionParameters.MaxConcurrentInspects) if err != nil { return nil, errors.Join(err, rollupsMachine.Close(ctx)) } @@ -285,25 +294,38 @@ func createMachine(ctx context.Context, return nodeMachine, err } +func getProcessedInputs(ctx context.Context, mr MachinesRepository, appAddress string, index uint64) ([]*Input, error) { + f := repository.InputFilter{InputIndex: Pointer(index), NotStatus: Pointer(InputCompletionStatus_None)} + return mr.ListInputs(ctx, appAddress, f, repository.Pagination{}) +} + func catchUp(ctx context.Context, - repo Repository, - app Address, + repo MachinesRepository, + app *Application, machine *nm.NodeMachine, - processedInputs uint64, logger *slog.Logger, ) error { + appAddress := app.IApplicationAddress.String() + logger.Info("catching up processed inputs", + "application", app.Name, + "address", appAddress, + "processed_inputs", app.ProcessedInputs, + ) - logger.Info("catching up unprocessed inputs", "app", app) - - inputs, err := repo.GetProcessedInputs(ctx, app, processedInputs) + inputs, err := getProcessedInputs(ctx, repo, appAddress, 0) if err != nil { return err } + if uint64(len(inputs)) != app.ProcessedInputs { + errorMsg := fmt.Sprintf("processed inputs do not match: expected %d, got %d", len(inputs), app.ProcessedInputs) + logger.Error(errorMsg, "application", app.Name, "address", appAddress) + return errors.New(errorMsg) + } + for _, input := range inputs { - // FIXME epoch id to epoch index - logger.Info("advancing", "app", app, "epochId", input.EpochId, - "input_index", input.Index) + logger.Info("advancing", "application", app.Name, "address", appAddress, + "epochIndex", input.EpochIndex, "input_index", input.Index) _, err := machine.Advance(ctx, input.RawData, input.Index) if err != nil { return err diff --git a/internal/claimer/claimer.go b/internal/claimer/claimer.go index bb17115fb..c77094c26 100644 --- a/internal/claimer/claimer.go +++ b/internal/claimer/claimer.go @@ -43,7 +43,9 @@ import ( "time" "github.com/cartesi/rollups-node/internal/config" + . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/cartesi/rollups-node/pkg/service" @@ -58,11 +60,6 @@ var ( ErrMissingEvent = fmt.Errorf("accepted claim has no matching blockchain event") ) -type address = common.Address -type hash = common.Hash -type claimRow = repository.ClaimRow -type claimSubmissionEvent = iconsensus.IConsensusClaimSubmission - type CreateInfo struct { service.CreateInfo @@ -71,7 +68,7 @@ type CreateInfo struct { BlockchainHttpEndpoint config.Redacted[string] EthConn *ethclient.Client PostgresEndpoint config.Redacted[string] - Repository *repository.Database + Repository repository.Repository EnableSubmission bool MaxStartupTime time.Duration } @@ -80,10 +77,10 @@ type Service struct { service.Service submissionEnabled bool - Repository *repository.Database + Repository repository.Repository EthConn *ethclient.Client TxOpts *bind.TransactOpts - claimsInFlight map[address]hash // -> txHash + claimsInFlight map[common.Address]common.Hash // -> txHash } func (c *CreateInfo) LoadEnv() { @@ -120,17 +117,15 @@ func Create(c *CreateInfo, s *Service) error { } if s.Repository == nil { - if c.Repository == nil { - c.Repository, err = repository.Connect(s.Context, c.PostgresEndpoint.Value, s.Logger) - if err != nil { - return err - } + c.Repository, err = factory.NewRepositoryFromConnectionString(s.Context, c.PostgresEndpoint.Value) + if err != nil { + return err } s.Repository = c.Repository } if s.claimsInFlight == nil { - s.claimsInFlight = map[address]hash{} + s.claimsInFlight = map[common.Address]common.Hash{} } if s.submissionEnabled && s.TxOpts == nil { @@ -182,15 +177,15 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { continue } if claim, ok := currClaims[key]; ok { - err = se.updateEpochWithSubmittedClaim(&claim, receipt.TxHash) + err = se.updateEpochWithSubmittedClaim(claim, receipt.TxHash) if err != nil { errs = append(errs, err) return errs } s.Logger.Info("Claim submitted", - "app", claim.AppContractAddress, - "claim", claim.EpochHash, - "last_block", claim.EpochLastBlock, + "app", claim.IApplicationAddress, + "claim_hash", fmt.Sprintf("%x", claim.ClaimHash), + "last_block", claim.LastBlock, "tx", txHash) delete(currClaims, key) } else { @@ -203,15 +198,16 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { // check computed claims for key, currClaimRow := range currClaims { var ic *iconsensus.IConsensus = nil - var prevEvent *claimSubmissionEvent = nil - var currEvent *claimSubmissionEvent = nil + var prevEvent *iconsensus.IConsensusClaimSubmission = nil + var currEvent *iconsensus.IConsensusClaimSubmission = nil if _, isInFlight := s.claimsInFlight[key]; isInFlight { continue } - if prevClaimRow, ok := prevClaims[key]; ok { - err := checkClaimsConstraint(&prevClaimRow, &currClaimRow) + prevClaimRow, prevExists := prevClaims[key] + if prevExists { + err := checkClaimsConstraint(prevClaimRow, currClaimRow) if err != nil { s.Logger.Error("database mismatch", "prevClaim", prevClaimRow, @@ -225,7 +221,7 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { // if prevClaimRow exists, there must be a matching event ic, prevEvent, currEvent, err = - se.findClaimSubmissionEventAndSucc(&prevClaimRow) + se.findClaimSubmissionEventAndSucc(prevClaimRow) if err != nil { delete(currClaims, key) errs = append(errs, err) @@ -240,7 +236,7 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { errs = append(errs, ErrMissingEvent) goto nextApp } - if !claimMatchesEvent(&prevClaimRow, prevEvent) { + if !claimMatchesEvent(prevClaimRow, prevEvent) { s.Logger.Error("event mismatch", "claim", prevClaimRow, "event", prevEvent, @@ -253,7 +249,7 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { } else { // first claim ic, currEvent, _, err = - se.findClaimSubmissionEventAndSucc(&currClaimRow) + se.findClaimSubmissionEventAndSucc(currClaimRow) if err != nil { delete(currClaims, key) errs = append(errs, err) @@ -262,7 +258,12 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { } if currEvent != nil { - if !claimMatchesEvent(&currClaimRow, currEvent) { + s.Logger.Debug("Found ClaimSubmitted Event", + "app", currEvent.AppContract, + "claim_hash", fmt.Sprintf("%x", currEvent.Claim), + "last_block", currEvent.LastProcessedBlockNumber.Uint64(), + ) + if !claimMatchesEvent(currClaimRow, currEvent) { s.Logger.Error("event mismatch", "claim", currClaimRow, "event", currEvent, @@ -272,46 +273,71 @@ func (s *Service) submitClaimsAndUpdateDatabase(se sideEffects) []error { errs = append(errs, ErrEventMismatch) goto nextApp } + s.Logger.Debug("Updating claim status to submitted", + "app", currClaimRow.IApplicationAddress, + "claim_hash", fmt.Sprintf("%x", currClaimRow.ClaimHash), + "last_block", currClaimRow.LastBlock, + ) txHash := currEvent.Raw.TxHash - err = se.updateEpochWithSubmittedClaim(&currClaimRow, txHash) + err = se.updateEpochWithSubmittedClaim(currClaimRow, txHash) if err != nil { delete(currClaims, key) errs = append(errs, err) goto nextApp } delete(s.claimsInFlight, key) + s.Logger.Info("Claim previously submitted", + "app", currClaimRow.IApplicationAddress, + "claim_hash", fmt.Sprintf("%x", currClaimRow.ClaimHash), + "last_block", currClaimRow.LastBlock, + ) } else if s.submissionEnabled { - txHash, err := se.submitClaimToBlockchain(ic, &currClaimRow) + if prevClaimRow != nil && prevClaimRow.Status != EpochStatus_ClaimAccepted { + s.Logger.Debug("Waiting previous claim to be accepted before submitting new one. Previous:", + "app", prevClaimRow.IApplicationAddress, + "claim_hash", fmt.Sprintf("%x", prevClaimRow.ClaimHash), + "last_block", prevClaimRow.LastBlock, + ) + goto nextApp + } + s.Logger.Debug("Submitting claim to blockchain", + "app", currClaimRow.IApplicationAddress, + "claim_hash", fmt.Sprintf("%x", currClaimRow.ClaimHash), + "last_block", currClaimRow.LastBlock, + ) + txHash, err := se.submitClaimToBlockchain(ic, currClaimRow) if err != nil { delete(currClaims, key) errs = append(errs, err) goto nextApp } - s.Logger.Info("Submitting claim to blockchain", - "app", currClaimRow.AppContractAddress, - "claim", currClaimRow.EpochHash, - "last_block", currClaimRow.EpochLastBlock, + s.claimsInFlight[key] = txHash + } else { + s.Logger.Debug("Claim submission disabled. Doing nothing", + "app", currClaimRow.IApplicationAddress, + "claim_hash", fmt.Sprintf("%x", currClaimRow.ClaimHash), + "last_block", currClaimRow.LastBlock, ) - s.claimsInFlight[currClaimRow.AppContractAddress] = txHash + } nextApp: } return errs } -func checkClaimConstraint(c *claimRow) error { - zeroAddress := address{} +func checkClaimConstraint(c *ClaimRow) error { + zeroAddress := common.Address{} - if c.EpochFirstBlock > c.EpochLastBlock { + if c.FirstBlock > c.LastBlock { return ErrClaimMismatch } - if c.AppIConsensusAddress == zeroAddress { + if c.IConsensusAddress == zeroAddress { return ErrClaimMismatch } return nil } -func checkClaimsConstraint(p *claimRow, c *claimRow) error { +func checkClaimsConstraint(p *ClaimRow, c *ClaimRow) error { var err error err = checkClaimConstraint(c) @@ -324,24 +350,25 @@ func checkClaimsConstraint(p *claimRow, c *claimRow) error { } // p, c consistent - if p.AppContractAddress != c.AppContractAddress { + if p.IApplicationAddress != c.IApplicationAddress { return ErrClaimMismatch } - if p.EpochLastBlock > c.EpochLastBlock { + if p.LastBlock > c.LastBlock { return ErrClaimMismatch } - if p.EpochFirstBlock > c.EpochFirstBlock { + if p.FirstBlock > c.FirstBlock { return ErrClaimMismatch } - if p.EpochIndex > c.EpochIndex { + if p.Index > c.Index { return ErrClaimMismatch } return nil } -func claimMatchesEvent(c *claimRow, e *claimSubmissionEvent) bool { - return c.AppContractAddress == e.AppContract && - c.EpochLastBlock == e.LastProcessedBlockNumber.Uint64() +func claimMatchesEvent(c *ClaimRow, e *iconsensus.IConsensusClaimSubmission) bool { + return c.IApplicationAddress == e.AppContract && + *c.ClaimHash == e.Claim && + c.LastBlock == e.LastProcessedBlockNumber.Uint64() } func (s *Service) Start(context context.Context, ready chan<- struct{}) error { diff --git a/internal/claimer/claimer_test.go b/internal/claimer/claimer_test.go index f64d114af..63e4833c0 100644 --- a/internal/claimer/claimer_test.go +++ b/internal/claimer/claimer_test.go @@ -9,13 +9,14 @@ import ( "os" "testing" + "github.com/cartesi/rollups-node/internal/model" + . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/cartesi/rollups-node/pkg/service" - "github.com/lmittmann/tint" "github.com/ethereum/go-ethereum/common" - . "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/lmittmann/tint" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -27,45 +28,45 @@ type serviceMock struct { } func (m *serviceMock) selectClaimPairsPerApp() ( - map[address]claimRow, - map[address]claimRow, + map[common.Address]*ClaimRow, + map[common.Address]*ClaimRow, error, ) { args := m.Called() - return args.Get(0).(map[address]claimRow), - args.Get(1).(map[address]claimRow), + return args.Get(0).(map[common.Address]*ClaimRow), + args.Get(1).(map[common.Address]*ClaimRow), args.Error(2) } func (m *serviceMock) updateEpochWithSubmittedClaim( - claim *claimRow, - txHash Hash, + claim *ClaimRow, + txHash common.Hash, ) error { args := m.Called(claim, txHash) return args.Error(0) } func (m *serviceMock) findClaimSubmissionEventAndSucc( - claim *claimRow, + claim *ClaimRow, ) ( *iconsensus.IConsensus, - *claimSubmissionEvent, - *claimSubmissionEvent, + *iconsensus.IConsensusClaimSubmission, + *iconsensus.IConsensusClaimSubmission, error, ) { args := m.Called(claim) return args.Get(0).(*iconsensus.IConsensus), - args.Get(1).(*claimSubmissionEvent), - args.Get(2).(*claimSubmissionEvent), + args.Get(1).(*iconsensus.IConsensusClaimSubmission), + args.Get(2).(*iconsensus.IConsensusClaimSubmission), args.Error(3) } func (m *serviceMock) submitClaimToBlockchain( instance *iconsensus.IConsensus, - claim *claimRow, -) (Hash, error) { + claim *ClaimRow, +) (common.Hash, error) { args := m.Called(nil, claim) - return args.Get(0).(Hash), args.Error(1) + return args.Get(0).(common.Hash), args.Error(1) } -func (m *serviceMock) pollTransaction(txHash Hash) (bool, *types.Receipt, error) { +func (m *serviceMock) pollTransaction(txHash common.Hash) (bool, *types.Receipt, error) { args := m.Called(txHash) return args.Bool(0), args.Get(1).(*types.Receipt), @@ -87,7 +88,7 @@ func newServiceMock() *serviceMock { Logger: slog.New(handler), }, submissionEnabled: true, - claimsInFlight: map[address]hash{}, + claimsInFlight: map[common.Address]common.Hash{}, }, } } @@ -97,8 +98,8 @@ func newServiceMock() *serviceMock { // ////////////////////////////////////////////////////////////////////////////// func TestDoNothing(t *testing.T) { m := newServiceMock() - prevClaims := map[address]claimRow{} - currClaims := map[address]claimRow{} + prevClaims := map[common.Address]*ClaimRow{} + currClaims := map[common.Address]*ClaimRow{} m.On("selectClaimPairsPerApp"). Return(prevClaims, currClaims, nil) @@ -111,19 +112,23 @@ func TestSubmitFirstClaim(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") claimTransactionHash := common.HexToHash("0x10") - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, + claimHash := common.HexToHash("0x100") + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, } - var prevEvent *claimSubmissionEvent = nil - var currEvent *claimSubmissionEvent = nil - prevClaims := map[address]claimRow{} - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + var prevEvent *iconsensus.IConsensusClaimSubmission = nil + var currEvent *iconsensus.IConsensusClaimSubmission = nil + prevClaims := map[common.Address]*ClaimRow{} + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). @@ -147,31 +152,41 @@ func TestSubmitClaimWithAntecessor(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") claimTransactionHash := common.HexToHash("0x10") - prevClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, - } - - prevClaims := map[address]claimRow{ - appContractAddress: prevClaim, - } - var currEvent *claimSubmissionEvent = nil - prevEvent := &claimSubmissionEvent{ - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock), + claimHash := common.HexToHash("0x100") + prevClaimHash := common.HexToHash("0x101") + prevClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &prevClaimHash, + Status: model.EpochStatus_ClaimAccepted, + }, + } + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, + } + + prevClaims := map[common.Address]*ClaimRow{ + appContractAddress: &prevClaim, + } + var currEvent *iconsensus.IConsensusClaimSubmission = nil + prevEvent := &iconsensus.IConsensusClaimSubmission{ + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock), AppContract: appContractAddress, + Claim: *prevClaim.ClaimHash, } - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). @@ -196,19 +211,23 @@ func TestSkipSubmitFirstClaim(t *testing.T) { m.submissionEnabled = false appContractAddress := common.HexToAddress("0x01") claimTransactionHash := common.HexToHash("0x10") - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, + claimHash := common.HexToHash("0x100") + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, } - var prevEvent *claimSubmissionEvent = nil - var currEvent *claimSubmissionEvent = nil - prevClaims := map[address]claimRow{} - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + var prevEvent *iconsensus.IConsensusClaimSubmission = nil + var currEvent *iconsensus.IConsensusClaimSubmission = nil + prevClaims := map[common.Address]*ClaimRow{} + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). @@ -233,31 +252,40 @@ func TestSkipSubmitClaimWithAntecessor(t *testing.T) { m.submissionEnabled = false appContractAddress := common.HexToAddress("0x01") claimTransactionHash := common.HexToHash("0x10") - prevClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, - } - - prevClaims := map[address]claimRow{ - appContractAddress: prevClaim, - } - var currEvent *claimSubmissionEvent = nil - prevEvent := &claimSubmissionEvent{ - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock), + claimHash := common.HexToHash("0x100") + prevClaimHash := common.HexToHash("0x101") + prevClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &prevClaimHash, + }, + } + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, + } + + prevClaims := map[common.Address]*ClaimRow{ + appContractAddress: &prevClaim, + } + var currEvent *iconsensus.IConsensusClaimSubmission = nil + prevEvent := &iconsensus.IConsensusClaimSubmission{ + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock), AppContract: appContractAddress, + Claim: *prevClaim.ClaimHash, } - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). @@ -281,17 +309,21 @@ func TestInFlightCompleted(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") reqHash := common.HexToHash("0x10") - txHash := common.HexToHash("0x100") - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - prevClaims := map[address]claimRow{} - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + claimHash := common.HexToHash("0x100") + txHash := common.HexToHash("0x1000") + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &claimHash, + }, + } + prevClaims := map[common.Address]*ClaimRow{} + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.claimsInFlight[appContractAddress] = reqHash @@ -318,22 +350,27 @@ func TestInFlightCompleted(t *testing.T) { func TestUpdateFirstClaim(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, + claimHash := common.HexToHash("0x100") + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, } - var nilEvent *claimSubmissionEvent = nil - currEvent := claimSubmissionEvent{ + var nilEvent *iconsensus.IConsensusClaimSubmission = nil + currEvent := iconsensus.IConsensusClaimSubmission{ AppContract: appContractAddress, - LastProcessedBlockNumber: new(big.Int).SetUint64(currClaim.EpochLastBlock), + LastProcessedBlockNumber: new(big.Int).SetUint64(currClaim.LastBlock), + Claim: *currClaim.ClaimHash, } - prevClaims := map[address]claimRow{} - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + prevClaims := map[common.Address]*ClaimRow{} + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). Return(prevClaims, currClaims, nil) @@ -355,34 +392,44 @@ func TestUpdateFirstClaim(t *testing.T) { func TestUpdateClaimWithAntecessor(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") - prevClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, - } - - prevEvent := claimSubmissionEvent{ + claimHash := common.HexToHash("0x100") + prevClaimHash := common.HexToHash("0x101") + prevClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &prevClaimHash, + }, + } + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, + } + + prevEvent := iconsensus.IConsensusClaimSubmission{ AppContract: appContractAddress, - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock), + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock), + Claim: *prevClaim.ClaimHash, } - currEvent := claimSubmissionEvent{ + currEvent := iconsensus.IConsensusClaimSubmission{ AppContract: appContractAddress, - LastProcessedBlockNumber: new(big.Int).SetUint64(currClaim.EpochLastBlock), + LastProcessedBlockNumber: new(big.Int).SetUint64(currClaim.LastBlock), + Claim: *currClaim.ClaimHash, } - prevClaims := map[address]claimRow{ - appContractAddress: prevClaim, + prevClaims := map[common.Address]*ClaimRow{ + appContractAddress: &prevClaim, } - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). Return(prevClaims, currClaims, nil) @@ -410,31 +457,40 @@ func TestSubmitClaimWithAntecessorMismatch(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") claimTransactionHash := common.HexToHash("0x10") - prevClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, - } - - prevClaims := map[address]claimRow{ - appContractAddress: prevClaim, - } - var currEvent *claimSubmissionEvent = nil - prevEvent := &claimSubmissionEvent{ - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock + 1), + claimHash := common.HexToHash("0x100") + prevClaimHash := common.HexToHash("0x101") + prevClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &prevClaimHash, + }, + } + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, + } + + prevClaims := map[common.Address]*ClaimRow{ + appContractAddress: &prevClaim, + } + var currEvent *iconsensus.IConsensusClaimSubmission = nil + prevEvent := &iconsensus.IConsensusClaimSubmission{ + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock + 1), AppContract: appContractAddress, + Claim: *prevClaim.ClaimHash, } - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). @@ -453,34 +509,44 @@ func TestSubmitClaimWithAntecessorMismatch(t *testing.T) { func TestSubmitClaimWithEventMismatch(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") - prevClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 3, - EpochFirstBlock: 30, - EpochLastBlock: 39, - } - - prevEvent := claimSubmissionEvent{ + claimHash := common.HexToHash("0x100") + prevClaimHash := common.HexToHash("0x101") + prevClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &prevClaimHash, + }, + } + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 3, + FirstBlock: 30, + LastBlock: 39, + ClaimHash: &claimHash, + }, + } + + prevEvent := iconsensus.IConsensusClaimSubmission{ AppContract: appContractAddress, - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock), + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock), + Claim: *prevClaim.ClaimHash, } - currEvent := claimSubmissionEvent{ + currEvent := iconsensus.IConsensusClaimSubmission{ AppContract: appContractAddress, - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock + 1), + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock + 1), + Claim: *currClaim.ClaimHash, } - prevClaims := map[address]claimRow{ - appContractAddress: prevClaim, + prevClaims := map[common.Address]*ClaimRow{ + appContractAddress: &prevClaim, } - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). Return(prevClaims, currClaims, nil) @@ -499,31 +565,40 @@ func TestSubmitClaimWithAntecessorOutOfOrder(t *testing.T) { m := newServiceMock() appContractAddress := common.HexToAddress("0x01") claimTransactionHash := common.HexToHash("0x10") - prevClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 2, - EpochFirstBlock: 20, - EpochLastBlock: 29, - } - currClaim := claimRow{ - AppContractAddress: appContractAddress, - AppIConsensusAddress: appContractAddress, - EpochIndex: 1, - EpochFirstBlock: 10, - EpochLastBlock: 19, - } - - prevClaims := map[address]claimRow{ - appContractAddress: prevClaim, - } - var currEvent *claimSubmissionEvent = nil - prevEvent := &claimSubmissionEvent{ - LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.EpochLastBlock + 1), + claimHash := common.HexToHash("0x100") + prevClaimHash := common.HexToHash("0x101") + prevClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 2, + FirstBlock: 20, + LastBlock: 29, + ClaimHash: &prevClaimHash, + }, + } + currClaim := ClaimRow{ + IApplicationAddress: appContractAddress, + IConsensusAddress: appContractAddress, + Epoch: Epoch{ + Index: 1, + FirstBlock: 10, + LastBlock: 19, + ClaimHash: &claimHash, + }, + } + + prevClaims := map[common.Address]*ClaimRow{ + appContractAddress: &prevClaim, + } + var currEvent *iconsensus.IConsensusClaimSubmission = nil + prevEvent := &iconsensus.IConsensusClaimSubmission{ + LastProcessedBlockNumber: new(big.Int).SetUint64(prevClaim.LastBlock + 1), AppContract: appContractAddress, + Claim: *prevClaim.ClaimHash, } - currClaims := map[address]claimRow{ - appContractAddress: currClaim, + currClaims := map[common.Address]*ClaimRow{ + appContractAddress: &currClaim, } m.On("selectClaimPairsPerApp"). diff --git a/internal/claimer/side-effects.go b/internal/claimer/side-effects.go index 78987062a..c7118b1e1 100644 --- a/internal/claimer/side-effects.go +++ b/internal/claimer/side-effects.go @@ -7,40 +7,42 @@ import ( "fmt" "math/big" + . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) type sideEffects interface { // database selectClaimPairsPerApp() ( - map[address]claimRow, - map[address]claimRow, + map[common.Address]*ClaimRow, + map[common.Address]*ClaimRow, error, ) updateEpochWithSubmittedClaim( - claim *claimRow, - txHash hash, + claim *ClaimRow, + txHash common.Hash, ) error // blockchain findClaimSubmissionEventAndSucc( - claim *claimRow, + claim *ClaimRow, ) ( *iconsensus.IConsensus, - *claimSubmissionEvent, - *claimSubmissionEvent, + *iconsensus.IConsensusClaimSubmission, + *iconsensus.IConsensusClaimSubmission, error, ) submitClaimToBlockchain( ic *iconsensus.IConsensus, - claim *claimRow, + claim *ClaimRow, ) ( - hash, + common.Hash, error, ) - pollTransaction(txHash hash) ( + pollTransaction(txHash common.Hash) ( bool, *types.Receipt, error, @@ -48,8 +50,8 @@ type sideEffects interface { } func (s *Service) selectClaimPairsPerApp() ( - map[address]claimRow, - map[address]claimRow, + map[common.Address]*ClaimRow, + map[common.Address]*ClaimRow, error, ) { computed, accepted, err := s.Repository.SelectClaimPairsPerApp(s.Context) @@ -66,31 +68,33 @@ func (s *Service) selectClaimPairsPerApp() ( /* update the database epoch status to CLAIM_SUBMITTED and add a transaction hash */ func (s *Service) updateEpochWithSubmittedClaim( - claim *claimRow, - txHash hash, + claim *ClaimRow, + txHash common.Hash, ) error { - err := s.Repository.UpdateEpochWithSubmittedClaim(s.Context, claim.EpochID, txHash) + err := s.Repository.UpdateEpochWithSubmittedClaim(s.Context, claim.ApplicationID, claim.Index, txHash) if err != nil { s.Logger.Error("updateEpochWithSubmittedClaim:failed", - "appContractAddress", claim.AppContractAddress, - "hash", claim.EpochHash, + "appContractAddress", claim.IApplicationAddress, + "hash", claim.ClaimHash, + "last_block", claim.LastBlock, "txHash", txHash, "error", err) } else { s.Logger.Debug("updateEpochWithSubmittedClaim:success", - "appContractAddress", claim.AppContractAddress, - "hash", claim.EpochHash, + "appContractAddress", claim.IApplicationAddress, + "last_block", claim.LastBlock, + "hash", claim.ClaimHash, "txHash", txHash) } return err } func (s *Service) findClaimSubmissionEventAndSucc( - claim *claimRow, + claim *ClaimRow, ) ( *iconsensus.IConsensus, - *claimSubmissionEvent, - *claimSubmissionEvent, + *iconsensus.IConsensusClaimSubmission, + *iconsensus.IConsensusClaimSubmission, error, ) { ic, curr, next, err := s.FindClaimSubmissionEventAndSucc(claim) @@ -110,28 +114,30 @@ func (s *Service) findClaimSubmissionEventAndSucc( func (s *Service) submitClaimToBlockchain( ic *iconsensus.IConsensus, - claim *claimRow, -) (hash, error) { - txHash := hash{} - lastBlockNumber := new(big.Int).SetUint64(claim.EpochLastBlock) - tx, err := ic.SubmitClaim(s.TxOpts, claim.AppContractAddress, - lastBlockNumber, claim.EpochHash) + claim *ClaimRow, +) (common.Hash, error) { + txHash := common.Hash{} + lastBlockNumber := new(big.Int).SetUint64(claim.LastBlock) + tx, err := ic.SubmitClaim(s.TxOpts, claim.IApplicationAddress, + lastBlockNumber, *claim.ClaimHash) if err != nil { s.Logger.Error("submitClaimToBlockchain:failed", - "appContractAddress", claim.AppContractAddress, - "claimHash", claim.EpochHash, + "appContractAddress", claim.IApplicationAddress, + "claimHash", *claim.ClaimHash, + "last_block", claim.LastBlock, "error", err) } else { txHash = tx.Hash() s.Logger.Debug("submitClaimToBlockchain:success", - "appContractAddress", claim.AppContractAddress, - "claimHash", claim.EpochHash, + "appContractAddress", claim.IApplicationAddress, + "claimHash", *claim.ClaimHash, + "last_block", claim.LastBlock, "TxHash", txHash) } return txHash, err } -func (s *Service) pollTransaction(txHash hash) (bool, *types.Receipt, error) { +func (s *Service) pollTransaction(txHash common.Hash) (bool, *types.Receipt, error) { ready, receipt, err := s.PollTransaction(txHash) if err != nil { s.Logger.Error("PollTransaction:failed", @@ -153,22 +159,22 @@ func (s *Service) pollTransaction(txHash hash) (bool, *types.Receipt, error) { // scan the event stream for a claimSubmission event that matches claim. // return this event and its successor func (s *Service) FindClaimSubmissionEventAndSucc( - claim *claimRow, + claim *ClaimRow, ) ( *iconsensus.IConsensus, - *claimSubmissionEvent, - *claimSubmissionEvent, + *iconsensus.IConsensusClaimSubmission, + *iconsensus.IConsensusClaimSubmission, error, ) { - ic, err := iconsensus.NewIConsensus(claim.AppIConsensusAddress, s.EthConn) + ic, err := iconsensus.NewIConsensus(claim.IConsensusAddress, s.EthConn) if err != nil { return nil, nil, nil, err } it, err := ic.FilterClaimSubmission(&bind.FilterOpts{ Context: s.Context, - Start: claim.EpochLastBlock, - }, nil, nil) + Start: claim.LastBlock, + }, nil, []common.Address{claim.IApplicationAddress}) if err != nil { return nil, nil, nil, err } @@ -177,7 +183,7 @@ func (s *Service) FindClaimSubmissionEventAndSucc( event := it.Event lastBlock := event.LastProcessedBlockNumber.Uint64() if claimMatchesEvent(claim, event) { - var succ *claimSubmissionEvent = nil + var succ *iconsensus.IConsensusClaimSubmission = nil if it.Next() { succ = it.Event } @@ -185,7 +191,7 @@ func (s *Service) FindClaimSubmissionEventAndSucc( return nil, nil, nil, it.Error() } return ic, event, succ, nil - } else if lastBlock > claim.EpochLastBlock { + } else if lastBlock > claim.LastBlock { err = fmt.Errorf("claim not found, searched up to %v", event) } } @@ -196,7 +202,7 @@ func (s *Service) FindClaimSubmissionEventAndSucc( } /* poll a transaction hash for its submission status and receipt */ -func (s *Service) PollTransaction(txHash hash) (bool, *types.Receipt, error) { +func (s *Service) PollTransaction(txHash common.Hash) (bool, *types.Receipt, error) { _, isPending, err := s.EthConn.TransactionByHash(s.Context, txHash) if err != nil || isPending { return false, nil, err diff --git a/internal/config/generate/Config.toml b/internal/config/generate/Config.toml index f17639a17..9f20a8b40 100644 --- a/internal/config/generate/Config.toml +++ b/internal/config/generate/Config.toml @@ -19,6 +19,12 @@ If set to true, the node will add colors to its log output.""" # Features # +[features.CARTESI_FEATURE_INPUT_READER_ENABLED] +default = "true" +go-type = "bool" +description = """ +If set to false, the node will not read inputs from the blockchain.""" + [features.CARTESI_FEATURE_CLAIM_SUBMISSION_ENABLED] default = "true" go-type = "bool" diff --git a/internal/config/generate/code.go b/internal/config/generate/code.go index d645412c6..e9557ce38 100644 --- a/internal/config/generate/code.go +++ b/internal/config/generate/code.go @@ -130,10 +130,10 @@ func ToLogLevelFromString(s string) (LogLevel, error) { func ToDefaultBlockFromString(s string) (DefaultBlock,error){ var m = map[string]DefaultBlock{ - "latest" : model.DefaultBlockStatusLatest, - "pending" : model.DefaultBlockStatusPending, - "safe" : model.DefaultBlockStatusSafe, - "finalized": model.DefaultBlockStatusFinalized, + "latest" : model.DefaultBlock_Latest, + "pending" : model.DefaultBlock_Pending, + "safe" : model.DefaultBlock_Safe, + "finalized": model.DefaultBlock_Finalized, } if v, ok := m[s]; ok { return v, nil diff --git a/internal/config/generated.go b/internal/config/generated.go index 7ece2a595..a96bbc687 100644 --- a/internal/config/generated.go +++ b/internal/config/generated.go @@ -74,10 +74,10 @@ func ToLogLevelFromString(s string) (LogLevel, error) { func ToDefaultBlockFromString(s string) (DefaultBlock, error) { var m = map[string]DefaultBlock{ - "latest": model.DefaultBlockStatusLatest, - "pending": model.DefaultBlockStatusPending, - "safe": model.DefaultBlockStatusSafe, - "finalized": model.DefaultBlockStatusFinalized, + "latest": model.DefaultBlock_Latest, + "pending": model.DefaultBlock_Pending, + "safe": model.DefaultBlock_Safe, + "finalized": model.DefaultBlock_Finalized, } if v, ok := m[s]; ok { return v, nil @@ -324,6 +324,18 @@ func GetFeatureClaimSubmissionEnabled() bool { return val } +func GetFeatureInputReaderEnabled() bool { + s, ok := os.LookupEnv("CARTESI_FEATURE_INPUT_READER_ENABLED") + if !ok { + s = "true" + } + val, err := toBool(s) + if err != nil { + panic(fmt.Sprintf("failed to parse CARTESI_FEATURE_INPUT_READER_ENABLED: %v", err)) + } + return val +} + func GetFeatureMachineHashCheckEnabled() bool { s, ok := os.LookupEnv("CARTESI_FEATURE_MACHINE_HASH_CHECK_ENABLED") if !ok { diff --git a/internal/evmreader/claim.go b/internal/evmreader/claim.go index c0d257dcd..1a8e302f9 100644 --- a/internal/evmreader/claim.go +++ b/internal/evmreader/claim.go @@ -6,8 +6,10 @@ package evmreader import ( "cmp" "context" + "strings" . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -15,7 +17,7 @@ import ( func (r *Service) checkForClaimStatus( ctx context.Context, - apps []application, + apps []appContracts, mostRecentBlockNumber uint64, ) { @@ -61,9 +63,14 @@ func (r *Service) checkForClaimStatus( } } +func getPreviousEpochsWithSubmittedClaims(ctx context.Context, er EvmReaderRepository, appAddress string, block uint64) ([]*Epoch, error) { + f := repository.EpochFilter{Status: Pointer(EpochStatus_ClaimSubmitted), BeforeBlock: Pointer(block)} + return er.ListEpochs(ctx, appAddress, f, repository.Pagination{}) +} + func (r *Service) readAndUpdateClaims( ctx context.Context, - apps []application, + apps []appContracts, lastClaimCheck, mostRecentBlockNumber uint64, ) { @@ -84,6 +91,7 @@ func (r *Service) readAndUpdateClaims( // If there is a key on indexApps, there is at least one // application in the referred application slice consensusContract := apps[0].consensusContract + epochLength := apps[0].application.EpochLength // Retrieve Claim Acceptance Events from blockchain appClaimAcceptanceEventMap, err := r.readClaimsAcceptance( @@ -101,16 +109,16 @@ func (r *Service) readAndUpdateClaims( // Check events against Epochs APP_LOOP: for app, claimAcceptances := range appClaimAcceptanceEventMap { - + appHexAddress := strings.ToLower(app.Hex()) for _, claimAcceptance := range claimAcceptances { // Get Previous Epochs with submitted claims, If is there any, // Application is in an invalid State. - previousEpochs, err := r.repository.GetPreviousEpochsWithOpenClaims( - ctx, app, claimAcceptance.LastProcessedBlockNumber.Uint64()) + previousEpochs, err := getPreviousEpochsWithSubmittedClaims( + ctx, r.repository, appHexAddress, claimAcceptance.LastProcessedBlockNumber.Uint64()) if err != nil { r.Logger.Error("Error retrieving previous submitted claims", - "app", app, + "address", app, "block", claimAcceptance.LastProcessedBlockNumber.Uint64(), "error", err) continue APP_LOOP @@ -124,13 +132,13 @@ func (r *Service) readAndUpdateClaims( // Get the Epoch for the current Claim Acceptance Event epoch, err := r.repository.GetEpoch( - ctx, calculateEpochIndex( - r.epochLengthCache[app], + ctx, app.Hex(), calculateEpochIndex( + epochLength, claimAcceptance.LastProcessedBlockNumber.Uint64()), - app) + ) if err != nil { r.Logger.Error("Error retrieving Epoch", - "app", app, + "address", app, "block", claimAcceptance.LastProcessedBlockNumber.Uint64(), "error", err) continue APP_LOOP @@ -138,44 +146,53 @@ func (r *Service) readAndUpdateClaims( // Check Epoch if epoch == nil { - r.Logger.Error( - "Found claim acceptance event for an unknown epoch. Application is in an invalid state", //nolint:lll - "app", app, - "claim last block", claimAcceptance.LastProcessedBlockNumber, - "hash", claimAcceptance.Claim) + if r.inputReaderEnabled { + r.Logger.Error( + "Found claim acceptance event for an unknown epoch. Application is in an invalid state", //nolint:lll + "address", app, + "claim last block", claimAcceptance.LastProcessedBlockNumber, + "hash", claimAcceptance.Claim) + } else { + r.Logger.Warn( + "Found claim acceptance event for an epoch that does not exist on the database", + "address", app, + "claim last block", claimAcceptance.LastProcessedBlockNumber, + ) + + } continue APP_LOOP } if epoch.ClaimHash == nil { r.Logger.Warn( "Found claim acceptance event, but claim hasn't been calculated yet", - "app", app, - "lastBlock", claimAcceptance.LastProcessedBlockNumber, + "address", app, + "last_block", claimAcceptance.LastProcessedBlockNumber, ) continue APP_LOOP } if claimAcceptance.Claim != *epoch.ClaimHash || claimAcceptance.LastProcessedBlockNumber.Uint64() != epoch.LastBlock { r.Logger.Error("Accepted Claim does not match actual Claim. Application is in an invalid state", //nolint:lll - "app", app, - "lastBlock", epoch.LastBlock, + "address", app, + "last_block", epoch.LastBlock, "hash", epoch.ClaimHash) continue APP_LOOP } - if epoch.Status == EpochStatusClaimAccepted { + if epoch.Status == EpochStatus_ClaimAccepted { r.Logger.Debug("Claim already accepted. Skipping", - "app", app, - "block", claimAcceptance.LastProcessedBlockNumber.Uint64(), + "address", app, + "last_block", claimAcceptance.LastProcessedBlockNumber.Uint64(), "claimStatus", epoch.Status, "hash", epoch.ClaimHash) continue } - if epoch.Status != EpochStatusClaimSubmitted { + if epoch.Status != EpochStatus_ClaimSubmitted { // this happens when running on latest. EvmReader can see the event before // the claim is marked as submitted by the claimer. r.Logger.Debug("Claim status is not submitted. Skipping for now", - "app", app, - "block", claimAcceptance.LastProcessedBlockNumber.Uint64(), + "address", app, + "last_block", claimAcceptance.LastProcessedBlockNumber.Uint64(), "claimStatus", epoch.Status, "hash", epoch.ClaimHash) continue APP_LOOP @@ -183,18 +200,18 @@ func (r *Service) readAndUpdateClaims( // Update Epoch claim status r.Logger.Info("Claim Accepted", - "app", app, - "lastBlock", epoch.LastBlock, + "address", app, + "epoch_index", epoch.Index, + "last_block", epoch.LastBlock, "hash", epoch.ClaimHash, - "epoch_id", epoch.Id, "last_claim_check_block", claimAcceptance.Raw.BlockNumber) - epoch.Status = EpochStatusClaimAccepted + epoch.Status = EpochStatus_ClaimAccepted // Store epoch - err = r.repository.UpdateEpochs( - ctx, app, []*Epoch{epoch}, claimAcceptance.Raw.BlockNumber) + err = r.repository.UpdateEpochsClaimAccepted( + ctx, appHexAddress, []*Epoch{epoch}, claimAcceptance.Raw.BlockNumber) if err != nil { - r.Logger.Error("Error storing claims", "app", app, "error", err) + r.Logger.Error("Error storing claims", "address", app, "error", err) continue } } @@ -232,14 +249,14 @@ func (r *Service) readClaimsAcceptance( // keyByLastClaimCheck is a LastClaimCheck key extractor function intended // to be used with `indexApps` function, see indexApps() -func keyByLastClaimCheck(app application) uint64 { - return app.LastClaimCheckBlock +func keyByLastClaimCheck(app appContracts) uint64 { + return app.application.LastClaimCheckBlock } // keyByIConsensus is a IConsensus address key extractor function intended // to be used with `indexApps` function, see indexApps() -func keyByIConsensus(app application) Address { - return app.IConsensusAddress +func keyByIConsensus(app appContracts) common.Address { + return app.application.IConsensusAddress } // sortByLastBlockNumber is a ClaimAcceptance's by last block number sorting function. diff --git a/internal/evmreader/evmreader.go b/internal/evmreader/evmreader.go index 9ce0c5de0..3a50b86ae 100644 --- a/internal/evmreader/evmreader.go +++ b/internal/evmreader/evmreader.go @@ -10,31 +10,34 @@ import ( "math/big" "time" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/jackc/pgx/v5" + "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/model" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/factory" appcontract "github.com/cartesi/rollups-node/pkg/contracts/iapplication" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/cartesi/rollups-node/pkg/contracts/iinputbox" "github.com/cartesi/rollups-node/pkg/service" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/rpc" - "github.com/jackc/pgx/v5" ) type CreateInfo struct { service.CreateInfo - model.EvmReaderPersistentConfig + model.NodeConfig[model.NodeConfigValue] PostgresEndpoint config.Redacted[string] BlockchainHttpEndpoint config.Redacted[string] BlockchainWsEndpoint config.Redacted[string] - Database *repository.Database + Repository repository.Repository + EnableInputReader bool MaxRetries uint64 MaxDelay time.Duration MaxStartupTime time.Duration @@ -50,8 +53,8 @@ type Service struct { contractFactory ContractFactory inputBoxDeploymentBlock uint64 defaultBlock DefaultBlock - epochLengthCache map[Address]uint64 hasEnabledApps bool + inputReaderEnabled bool } func (c *CreateInfo) LoadEnv() { @@ -63,12 +66,14 @@ func (c *CreateInfo) LoadEnv() { c.LogLevel = service.LogLevel(config.GetLogLevel()) c.LogPretty = config.GetLogPrettyEnabled() c.MaxStartupTime = config.GetMaxStartupTime() + c.EnableInputReader = config.GetFeatureInputReaderEnabled() // persistent - c.DefaultBlock = config.GetEvmReaderDefaultBlock() - c.InputBoxDeploymentBlock = uint64(config.GetContractsInputBoxDeploymentBlockNumber()) - c.InputBoxAddress = common.HexToAddress(config.GetContractsInputBoxAddress()) - c.ChainId = config.GetBlockchainId() + c.Key = BaseConfigKey + c.Value.DefaultBlock = config.GetEvmReaderDefaultBlock() + c.Value.InputBoxDeploymentBlock = uint64(config.GetContractsInputBoxDeploymentBlockNumber()) + c.Value.InputBoxAddress = common.HexToAddress(config.GetContractsInputBoxAddress()).String() + c.Value.ChainID = config.GetBlockchainId() } func Create(c *CreateInfo, s *Service) error { @@ -90,19 +95,19 @@ func Create(c *CreateInfo, s *Service) error { return err } - if c.Database == nil { - c.Database, err = repository.Connect(s.Context, c.PostgresEndpoint.Value, s.Logger) + if c.Repository == nil { + c.Repository, err = factory.NewRepositoryFromConnectionString(s.Context, c.PostgresEndpoint.Value) if err != nil { return err } } - err = s.SetupPersistentConfig(s.Context, c.Database, &c.EvmReaderPersistentConfig) + err = s.SetupNodeConfig(s.Context, c.Repository, &c.NodeConfig) if err != nil { return err } - inputSource, err := NewInputSourceAdapter(common.Address(c.InputBoxAddress), client) + inputSource, err := NewInputSourceAdapter(common.HexToAddress(c.Value.InputBoxAddress), client) if err != nil { return err } @@ -112,11 +117,12 @@ func Create(c *CreateInfo, s *Service) error { s.client = NewEhtClientWithRetryPolicy(client, c.MaxRetries, c.MaxDelay, s.Logger) s.wsClient = NewEthWsClientWithRetryPolicy(wsClient, c.MaxRetries, c.MaxDelay, s.Logger) s.inputSource = NewInputSourceWithRetryPolicy(inputSource, c.MaxRetries, c.MaxDelay, s.Logger) - s.repository = c.Database - s.inputBoxDeploymentBlock = c.InputBoxDeploymentBlock - s.defaultBlock = c.DefaultBlock + s.repository = c.Repository + s.inputBoxDeploymentBlock = c.Value.InputBoxDeploymentBlock + s.defaultBlock = c.Value.DefaultBlock s.contractFactory = contractFactory s.hasEnabledApps = true + s.inputReaderEnabled = c.EnableInputReader return nil }) @@ -152,21 +158,22 @@ func (s *Service) String() string { return s.Name } -func (me *Service) SetupPersistentConfig( +func (s *Service) SetupNodeConfig( ctx context.Context, - database *repository.Database, - c *model.EvmReaderPersistentConfig, + r repository.Repository, + c *model.NodeConfig[model.NodeConfigValue], ) error { - err := database.SelectEvmReaderConfig(ctx, c) + _, err := repository.LoadNodeConfig[model.NodeConfigValue](ctx, r, BaseConfigKey) if err == pgx.ErrNoRows { - _, err = database.InsertEvmReaderConfig(ctx, c) + s.Logger.Debug("Initializing node config", "config", c) + err = repository.SaveNodeConfig(ctx, r, c) if err != nil { return err } } else if err == nil { - me.Logger.Warn("Node was already configured. Using previous persistent config", "config", c) + s.Logger.Warn("Node was already configured. Using previous persistent config", "config", c.Value) } else { - me.Logger.Error("Could not retrieve persistent config from Database. %w", "error", err) + s.Logger.Error("Could not retrieve persistent config from Database. %w", "error", err) } return err } @@ -175,36 +182,31 @@ func (me *Service) SetupPersistentConfig( type InputSource interface { // Wrapper for FilterInputAdded(), which is automatically generated // by go-ethereum and cannot be used for testing - RetrieveInputs(opts *bind.FilterOpts, appAddresses []Address, index []*big.Int, + RetrieveInputs(opts *bind.FilterOpts, appAddresses []common.Address, index []*big.Int, ) ([]iinputbox.IInputBoxInputAdded, error) } // Interface for the node repository type EvmReaderRepository interface { - StoreEpochAndInputsTransaction( - ctx context.Context, epochInputMap map[*Epoch][]Input, blockNumber uint64, - appAddress Address, - ) (epochIndexIdMap map[uint64]uint64, epochIndexInputIdsMap map[uint64][]uint64, err error) - - GetAllRunningApplications(ctx context.Context) ([]Application, error) - SelectEvmReaderConfig(context.Context, *model.EvmReaderPersistentConfig) error - GetEpoch(ctx context.Context, indexKey uint64, appAddressKey Address) (*Epoch, error) - GetPreviousEpochsWithOpenClaims( - ctx context.Context, - app Address, - lastBlock uint64, - ) ([]*Epoch, error) - UpdateEpochs(ctx context.Context, - app Address, - claims []*Epoch, - mostRecentBlockNumber uint64, - ) error - GetOutput( - ctx context.Context, appAddressKey Address, indexKey uint64, - ) (*Output, error) - UpdateOutputExecutionTransaction( - ctx context.Context, app Address, executedOutputs []*Output, blockNumber uint64, + ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination) ([]*Application, error) + + SaveNodeConfigRaw(ctx context.Context, key string, rawJSON []byte) error + LoadNodeConfigRaw(ctx context.Context, key string) (rawJSON []byte, createdAt, updatedAt time.Time, err error) + + // Input monitor + CreateEpochsAndInputs( + ctx context.Context, nameOrAddress string, + epochInputMap map[*Epoch][]*Input, blockNumber uint64, ) error + GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) + ListEpochs(ctx context.Context, nameOrAddress string, f repository.EpochFilter, p repository.Pagination) ([]*Epoch, error) + + // Claim acceptance monitor + UpdateEpochsClaimAccepted(ctx context.Context, nameOrAddress string, epochs []*Epoch, lastClaimCheckBlock uint64) error + + // Output execution monitor + GetOutput(ctx context.Context, nameOrAddress string, indexKey uint64) (*Output, error) + UpdateOutputsExecution(ctx context.Context, nameOrAddress string, executedOutputs []*Output, blockNumber uint64) error } // EthClient mimics part of ethclient.Client functions to narrow down the @@ -223,20 +225,20 @@ type ConsensusContract interface { GetEpochLength(opts *bind.CallOpts) (*big.Int, error) RetrieveClaimAcceptanceEvents( opts *bind.FilterOpts, - appAddresses []Address, + appAddresses []common.Address, ) ([]*iconsensus.IConsensusClaimAcceptance, error) } type ApplicationContract interface { - GetConsensus(opts *bind.CallOpts) (Address, error) + GetConsensus(opts *bind.CallOpts) (common.Address, error) RetrieveOutputExecutionEvents( opts *bind.FilterOpts, ) ([]*appcontract.IApplicationOutputExecuted, error) } type ContractFactory interface { - NewApplication(address Address) (ApplicationContract, error) - NewIConsensus(address Address) (ConsensusContract, error) + NewApplication(address common.Address) (ApplicationContract, error) + NewIConsensus(address common.Address) (ConsensusContract, error) } type SubscriptionError struct { @@ -248,16 +250,13 @@ func (e *SubscriptionError) Error() string { } // Internal struct to hold application and it's contracts together -type application struct { - Application +type appContracts struct { + application *Application applicationContract ApplicationContract consensusContract ConsensusContract } func (r *Service) Run(ctx context.Context, ready chan struct{}) error { - // Initialize epochLength cache - r.epochLengthCache = make(map[Address]uint64) - for { err := r.watchForNewBlocks(ctx, ready) // If the error is a SubscriptionError, re run watchForNewBlocks @@ -270,6 +269,11 @@ func (r *Service) Run(ctx context.Context, ready chan struct{}) error { } } +func getAllRunningApplications(ctx context.Context, er EvmReaderRepository) ([]*Application, error) { + f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled)} + return er.ListApplications(ctx, f, repository.Pagination{}) +} + // watchForNewBlocks watches for new blocks and reads new inputs based on the // default block configuration, which have not been processed yet. func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) error { @@ -294,8 +298,7 @@ func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) r.Logger.Debug("New block header received", "blockNumber", header.Number, "blockHash", header.Hash()) r.Logger.Debug("Retrieving enabled applications") - // Get All Applications - runningApps, err := r.repository.GetAllRunningApplications(ctx) + runningApps, err := getAllRunningApplications(ctx, r.repository) if err != nil { r.Logger.Error("Error retrieving running applications", "error", @@ -317,16 +320,20 @@ func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) r.hasEnabledApps = true // Build Contracts - var apps []application + var apps []appContracts for _, app := range runningApps { applicationContract, consensusContract, err := r.getAppContracts(app) if err != nil { r.Logger.Error("Error retrieving application contracts", "app", app, "error", err) continue } - apps = append(apps, application{Application: app, + aContracts := appContracts{ + application: app, applicationContract: applicationContract, - consensusContract: consensusContract}) + consensusContract: consensusContract, + } + + apps = append(apps, aContracts) } if len(apps) == 0 { @@ -335,7 +342,7 @@ func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) } blockNumber := header.Number.Uint64() - if r.defaultBlock != DefaultBlockStatusLatest { + if r.defaultBlock != DefaultBlock_Latest { mostRecentHeader, err := r.fetchMostRecentHeader( ctx, r.defaultBlock, @@ -371,13 +378,13 @@ func (r *Service) fetchMostRecentHeader( var defaultBlockNumber int64 switch defaultBlock { - case DefaultBlockStatusPending: + case DefaultBlock_Pending: defaultBlockNumber = rpc.PendingBlockNumber.Int64() - case DefaultBlockStatusLatest: + case DefaultBlock_Latest: defaultBlockNumber = rpc.LatestBlockNumber.Int64() - case DefaultBlockStatusFinalized: + case DefaultBlock_Finalized: defaultBlockNumber = rpc.FinalizedBlockNumber.Int64() - case DefaultBlockStatusSafe: + case DefaultBlock_Safe: defaultBlockNumber = rpc.SafeBlockNumber.Int64() default: return nil, fmt.Errorf("default block '%v' not supported", defaultBlock) @@ -399,9 +406,13 @@ func (r *Service) fetchMostRecentHeader( // getAppContracts retrieves the ApplicationContract and ConsensusContract for a given Application. // Also validates if IConsensus configuration matches the blockchain registered one -func (r *Service) getAppContracts(app Application, +func (r *Service) getAppContracts(app *Application, ) (ApplicationContract, ConsensusContract, error) { - applicationContract, err := r.contractFactory.NewApplication(app.ContractAddress) + if app == nil { + return nil, nil, fmt.Errorf("Application reference is nil. Should never happen") + } + + applicationContract, err := r.contractFactory.NewApplication(app.IApplicationAddress) if err != nil { return nil, nil, errors.Join( fmt.Errorf("error building application contract"), @@ -434,3 +445,18 @@ func (r *Service) getAppContracts(app Application, } return applicationContract, consensus, nil } + +// getEpochLength reads the application epoch length given it's consensus contract +func getEpochLength(consensus ConsensusContract) (uint64, error) { + // FIXME: move to ethutil + + epochLengthRaw, err := consensus.GetEpochLength(nil) + if err != nil { + return 0, errors.Join( + fmt.Errorf("error retrieving application epoch length"), + err, + ) + } + + return epochLengthRaw.Uint64(), nil +} diff --git a/internal/evmreader/input.go b/internal/evmreader/input.go index 1a7bdda46..1dd4b0865 100644 --- a/internal/evmreader/input.go +++ b/internal/evmreader/input.go @@ -5,7 +5,6 @@ package evmreader import ( "context" - "errors" "fmt" . "github.com/cartesi/rollups-node/internal/model" @@ -16,9 +15,12 @@ import ( // checkForNewInputs checks if is there new Inputs for all running Applications func (r *Service) checkForNewInputs( ctx context.Context, - apps []application, + apps []appContracts, mostRecentBlockNumber uint64, ) { + if !r.inputReaderEnabled { + return + } r.Logger.Debug("Checking for new inputs") @@ -79,32 +81,16 @@ func (r *Service) readAndStoreInputs( ctx context.Context, startBlock uint64, endBlock uint64, - apps []application, + apps []appContracts, ) error { - appsToProcess := []common.Address{} - - for _, app := range apps { - - // Get App EpochLength - err := r.addAppEpochLengthIntoCache(app) - if err != nil { - r.Logger.Error("Error adding epoch length into cache", - "app", app.ContractAddress, - "error", err) - continue - } - - appsToProcess = append(appsToProcess, app.ContractAddress) - } - - if len(appsToProcess) == 0 { + if len(apps) == 0 { r.Logger.Warn("No valid running applications") return nil } // Retrieve Inputs from blockchain - appInputsMap, err := r.readInputsFromBlockchain(ctx, appsToProcess, startBlock, endBlock) + appInputsMap, err := r.readInputsFromBlockchain(ctx, apps, startBlock, endBlock) if err != nil { return fmt.Errorf("failed to read inputs from block %v to block %v. %w", startBlock, @@ -112,26 +98,35 @@ func (r *Service) readAndStoreInputs( err) } + addrToApp := mapAddressToApp(apps) + // Index Inputs into epochs and handle epoch finalization for address, inputs := range appInputsMap { - epochLength := r.epochLengthCache[address] + app, exists := addrToApp[address] + if !exists { + r.Logger.Error("Application address on input not found", + "address", address) + continue + } + epochLength := app.application.EpochLength // Retrieves last open epoch from DB - currentEpoch, err := r.repository.GetEpoch(ctx, - calculateEpochIndex(epochLength, startBlock), address) + currentEpoch, err := r.repository.GetEpoch(ctx, address.String(), calculateEpochIndex(epochLength, startBlock)) if err != nil { r.Logger.Error("Error retrieving existing current epoch", - "app", address, + "application", app.application.Name, + "address", address, "error", err, ) continue } // Check current epoch status - if currentEpoch != nil && currentEpoch.Status != EpochStatusOpen { + if currentEpoch != nil && currentEpoch.Status != EpochStatus_Open { r.Logger.Error("Current epoch is not open", - "app", address, + "application", app.application.Name, + "address", address, "epoch_index", currentEpoch.Index, "status", currentEpoch.Status, ) @@ -139,7 +134,7 @@ func (r *Service) readAndStoreInputs( } // Initialize epochs inputs map - var epochInputMap = make(map[*Epoch][]Input) + var epochInputMap = make(map[*Epoch][]*Input) // Index Inputs into epochs for _, input := range inputs { @@ -148,12 +143,17 @@ func (r *Service) readAndStoreInputs( // If input belongs into a new epoch, close the previous known one if currentEpoch != nil && currentEpoch.Index != inputEpochIndex { - currentEpoch.Status = EpochStatusClosed + currentEpoch.Status = EpochStatus_Closed r.Logger.Info("Closing epoch", - "app", currentEpoch.AppAddress, + "application", app.application.Name, + "address", address, "epoch_index", currentEpoch.Index, "start", currentEpoch.FirstBlock, "end", currentEpoch.LastBlock) + _, ok := epochInputMap[currentEpoch] + if !ok { + epochInputMap[currentEpoch] = []*Input{} + } currentEpoch = nil } if currentEpoch == nil { @@ -161,50 +161,52 @@ func (r *Service) readAndStoreInputs( Index: inputEpochIndex, FirstBlock: inputEpochIndex * epochLength, LastBlock: (inputEpochIndex * epochLength) + epochLength - 1, - Status: EpochStatusOpen, - AppAddress: address, + Status: EpochStatus_Open, } - epochInputMap[currentEpoch] = []Input{} + epochInputMap[currentEpoch] = []*Input{} } r.Logger.Info("Found new Input", - "app", address, + "application", app.application.Name, + "address", address, "index", input.Index, "block", input.BlockNumber, "epoch_index", inputEpochIndex) currentInputs, ok := epochInputMap[currentEpoch] if !ok { - currentInputs = []Input{} + currentInputs = []*Input{} } - epochInputMap[currentEpoch] = append(currentInputs, *input) + epochInputMap[currentEpoch] = append(currentInputs, input) } // Indexed all inputs. Check if it is time to close this epoch if currentEpoch != nil && endBlock >= currentEpoch.LastBlock { - currentEpoch.Status = EpochStatusClosed + currentEpoch.Status = EpochStatus_Closed r.Logger.Info("Closing epoch", - "app", currentEpoch.AppAddress, + "application", app.application.Name, + "address", address, "epoch_index", currentEpoch.Index, "start", currentEpoch.FirstBlock, "end", currentEpoch.LastBlock) // Add to inputMap so it is stored _, ok := epochInputMap[currentEpoch] if !ok { - epochInputMap[currentEpoch] = []Input{} + epochInputMap[currentEpoch] = []*Input{} } } - _, _, err = r.repository.StoreEpochAndInputsTransaction( + err = r.repository.CreateEpochsAndInputs( ctx, + address.String(), epochInputMap, endBlock, - address, ) if err != nil { r.Logger.Error("Error storing inputs and epochs", - "app", address, + "application", app.application.Name, + "address", address, "error", err, ) continue @@ -212,9 +214,9 @@ func (r *Service) readAndStoreInputs( // Store everything if len(epochInputMap) > 0 { - r.Logger.Debug("Inputs and epochs stored successfully", - "app", address, + "application", app.application.Name, + "address", address, "start-block", startBlock, "end-block", endBlock, "total epochs", len(epochInputMap), @@ -229,44 +231,19 @@ func (r *Service) readAndStoreInputs( return nil } -// addAppEpochLengthIntoCache checks the epoch length cache and read epoch length from IConsensus -// contract and add it to the cache if needed -func (r *Service) addAppEpochLengthIntoCache(app application) error { - - epochLength, ok := r.epochLengthCache[app.ContractAddress] - if !ok { - - epochLength, err := getEpochLength(app.consensusContract) - if err != nil { - return errors.Join( - fmt.Errorf("error retrieving epoch length from contracts for app %s", - app.ContractAddress), - err) - } - r.epochLengthCache[app.ContractAddress] = epochLength - r.Logger.Info("Got epoch length from IConsensus", - "app", app.ContractAddress, - "epoch length", epochLength) - } else { - r.Logger.Debug("Got epoch length from cache", - "app", app.ContractAddress, - "epoch length", epochLength) - } - - return nil -} - // readInputsFromBlockchain read the inputs from the blockchain ordered by Input index func (r *Service) readInputsFromBlockchain( ctx context.Context, - appsAddresses []Address, + apps []appContracts, startBlock, endBlock uint64, -) (map[Address][]*Input, error) { +) (map[common.Address][]*Input, error) { // Initialize app input map - var appInputsMap = make(map[Address][]*Input) - for _, appsAddress := range appsAddresses { - appInputsMap[appsAddress] = []*Input{} + var appInputsMap = make(map[common.Address][]*Input) + var appsAddresses = []common.Address{} + for _, app := range apps { + appInputsMap[app.application.IApplicationAddress] = []*Input{} + appsAddresses = append(appsAddresses, app.application.IApplicationAddress) } opts := bind.FilterOpts{ @@ -282,15 +259,15 @@ func (r *Service) readInputsFromBlockchain( // Order inputs as order is not enforced by RetrieveInputs method nor the APIs for _, event := range inputsEvents { r.Logger.Debug("Received input", - "app", event.AppContract, + "address", event.AppContract, "index", event.Index, "block", event.Raw.BlockNumber) input := &Input{ - Index: event.Index.Uint64(), - CompletionStatus: InputStatusNone, - RawData: event.Input, - BlockNumber: event.Raw.BlockNumber, - AppAddress: event.AppContract, + Index: event.Index.Uint64(), + Status: InputCompletionStatus_None, + RawData: event.Input, + BlockNumber: event.Raw.BlockNumber, + TransactionReference: common.BigToHash(event.Index), } // Insert Sorted @@ -301,20 +278,6 @@ func (r *Service) readInputsFromBlockchain( } // byLastProcessedBlock key extractor function intended to be used with `indexApps` function -func byLastProcessedBlock(app application) uint64 { - return app.LastProcessedBlock -} - -// getEpochLength reads the application epoch length given it's consensus contract -func getEpochLength(consensus ConsensusContract) (uint64, error) { - - epochLengthRaw, err := consensus.GetEpochLength(nil) - if err != nil { - return 0, errors.Join( - fmt.Errorf("error retrieving application epoch length"), - err, - ) - } - - return epochLengthRaw.Uint64(), nil +func byLastProcessedBlock(app appContracts) uint64 { + return app.application.LastProcessedBlock } diff --git a/internal/evmreader/output.go b/internal/evmreader/output.go index 334717a22..a2f7cba2b 100644 --- a/internal/evmreader/output.go +++ b/internal/evmreader/output.go @@ -13,7 +13,7 @@ import ( func (r *Service) checkForOutputExecution( ctx context.Context, - apps []application, + apps []appContracts, mostRecentBlockNumber uint64, ) { @@ -23,7 +23,7 @@ func (r *Service) checkForOutputExecution( for _, app := range apps { - LastOutputCheck := app.LastOutputCheckBlock + LastOutputCheck := app.application.LastOutputCheckBlock // Safeguard: Only check blocks starting from the block where the InputBox // contract was deployed as Inputs can be added to that same block @@ -34,7 +34,7 @@ func (r *Service) checkForOutputExecution( if mostRecentBlockNumber > LastOutputCheck { r.Logger.Debug("Checking output execution for application", - "app", app.ContractAddress, + "application", app.application.Name, "address", app.application.IApplicationAddress, "last output check block", LastOutputCheck, "most recent block", mostRecentBlockNumber) @@ -42,14 +42,14 @@ func (r *Service) checkForOutputExecution( } else if mostRecentBlockNumber < LastOutputCheck { r.Logger.Warn( - "Not reading output execution: most recent block is lower than the last processed one", //nolint:lll - "app", app.ContractAddress, + "Not reading output execution: most recent block is lower than the last processed one", + "application", app.application.Name, "address", app.application.IApplicationAddress, "last output check block", LastOutputCheck, "most recent block", mostRecentBlockNumber, ) } else { r.Logger.Warn("Not reading output execution: already checked the most recent blocks", - "app", app.ContractAddress, + "application", app.application.Name, "address", app.application.IApplicationAddress, "last output check block", LastOutputCheck, "most recent block", mostRecentBlockNumber, ) @@ -59,7 +59,7 @@ func (r *Service) checkForOutputExecution( } func (r *Service) readAndUpdateOutputs( - ctx context.Context, app application, lastOutputCheck, mostRecentBlockNumber uint64) { + ctx context.Context, app appContracts, lastOutputCheck, mostRecentBlockNumber uint64) { contract := app.applicationContract @@ -70,7 +70,9 @@ func (r *Service) readAndUpdateOutputs( outputExecutedEvents, err := contract.RetrieveOutputExecutionEvents(opts) if err != nil { - r.Logger.Error("Error reading output events", "app", app.ContractAddress, "error", err) + r.Logger.Error("Error reading output events", + "application", app.application.Name, "address", app.application.IApplicationAddress, + "error", err) return } @@ -79,40 +81,49 @@ func (r *Service) readAndUpdateOutputs( for _, event := range outputExecutedEvents { // Compare output to check it is the correct one - output, err := r.repository.GetOutput(ctx, app.ContractAddress, event.OutputIndex) + output, err := r.repository.GetOutput(ctx, app.application.IApplicationAddress.Hex(), event.OutputIndex) if err != nil { r.Logger.Error("Error retrieving output", - "app", app.ContractAddress, "index", event.OutputIndex, "error", err) + "application", app.application.Name, "address", app.application.IApplicationAddress, + "index", event.OutputIndex, + "error", err) return } if output == nil { r.Logger.Warn("Found OutputExecuted event but output does not exist in the database yet", - "app", app.ContractAddress, "index", event.OutputIndex) + "application", app.application.Name, "address", app.application.IApplicationAddress, + "index", event.OutputIndex) return } if !bytes.Equal(output.RawData, event.Output) { r.Logger.Debug("Output mismatch", - "app", app.ContractAddress, "index", event.OutputIndex, - "actual", output.RawData, "event's", event.Output) + "application", app.application.Name, "address", app.application.IApplicationAddress, + "index", event.OutputIndex, + "actual", output.RawData, + "event's", event.Output) r.Logger.Error("Output mismatch. Application is in an invalid state", - "app", app.ContractAddress, + "application", app.application.Name, "address", app.application.IApplicationAddress, "index", event.OutputIndex) return } - r.Logger.Info("Output executed", "app", app.ContractAddress, "index", event.OutputIndex) - output.TransactionHash = &event.Raw.TxHash + r.Logger.Info("Output executed", + "application", app.application.Name, "address", app.application.IApplicationAddress, + "index", event.OutputIndex) + output.ExecutionTransactionHash = &event.Raw.TxHash executedOutputs = append(executedOutputs, output) } - err = r.repository.UpdateOutputExecutionTransaction( - ctx, app.ContractAddress, executedOutputs, mostRecentBlockNumber) + err = r.repository.UpdateOutputsExecution( + ctx, app.application.IApplicationAddress.Hex(), executedOutputs, mostRecentBlockNumber) if err != nil { - r.Logger.Error("Error storing output execution statuses", "app", app, "error", err) + r.Logger.Error("Error storing output execution statuses", + "application", app.application.Name, "address", app.application.IApplicationAddress, + "error", err) } } diff --git a/internal/evmreader/util.go b/internal/evmreader/util.go index a884692e3..3e6192f2b 100644 --- a/internal/evmreader/util.go +++ b/internal/evmreader/util.go @@ -8,6 +8,7 @@ import ( "slices" . "github.com/cartesi/rollups-node/internal/model" + "github.com/ethereum/go-ethereum/common" ) // calculateEpochIndex calculates the epoch index given the input block number @@ -17,14 +18,22 @@ func calculateEpochIndex(epochLength uint64, blockNumber uint64) uint64 { } // appsToAddresses -func appsToAddresses(apps []application) []Address { - var addresses []Address +func appsToAddresses(apps []appContracts) []common.Address { + var addresses []common.Address for _, app := range apps { - addresses = append(addresses, app.ContractAddress) + addresses = append(addresses, app.application.IApplicationAddress) } return addresses } +func mapAddressToApp(apps []appContracts) map[common.Address]appContracts { + result := make(map[common.Address]appContracts) + for _, app := range apps { + result[app.application.IApplicationAddress] = app + } + return result +} + // sortByInputIndex is a compare function that orders Inputs // by index field. It is intended to be used with `insertSorted`, see insertSorted() func sortByInputIndex(a, b *Input) int { @@ -44,11 +53,11 @@ func insertSorted[T any](compare func(a, b *T) int, slice []*T, item *T) []*T { // Index applications given a key extractor function func indexApps[K comparable]( - keyExtractor func(application) K, - apps []application, -) map[K][]application { + keyExtractor func(appContracts) K, + apps []appContracts, +) map[K][]appContracts { - result := make(map[K][]application) + result := make(map[K][]appContracts) for _, item := range apps { key := keyExtractor(item) result[key] = append(result[key], item) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index f9f85b6fa..1efce89a9 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -11,13 +11,12 @@ import ( "io" "log/slog" "net/http" - "net/url" "time" "github.com/cartesi/rollups-node/internal/advancer/machines" . "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/nodemachine" - "github.com/ethereum/go-ethereum/common" + "github.com/cartesi/rollups-node/internal/services" + "github.com/cartesi/rollups-node/pkg/service" "github.com/ethereum/go-ethereum/common/hexutil" ) @@ -26,10 +25,23 @@ var ( ErrNoApp = errors.New("no machine for application") ) +type IInspectMachines interface { + GetInspectMachine(appId int64) (machines.InspectMachine, bool) +} + +type IInspectMachine interface { + Inspect(_ context.Context, query []byte) (*InspectResult, error) +} + +type InspectRepository interface { + GetApplication(ctx context.Context, nameOrAddress string) (*Application, error) +} + type Inspector struct { IInspectMachines - Logger *slog.Logger - ServeMux *http.ServeMux + repository InspectRepository + Logger *slog.Logger + ServeMux *http.ServeMux } type ReportResponse struct { @@ -43,26 +55,41 @@ type InspectResponse struct { ProcessedInputs uint64 `json:"processed_input_count"` } -func (s *Inspector) CreateInspectServer( - addr string, - maxRetries int, - retryInterval time.Duration, - mux *http.ServeMux, -) (*http.Server, func() error) { +func NewInspector( + repo InspectRepository, + machines IInspectMachines, + address string, + logLevel service.LogLevel, + logPretty bool, +) (*Inspector, *http.Server, func() error) { + logger := service.NewLogger(slog.Level(logLevel), logPretty) + logger = logger.With("service", "inspect") + inspector := &Inspector{ + IInspectMachines: machines, + repository: repo, + Logger: logger, + ServeMux: http.NewServeMux(), + } + + inspector.ServeMux.Handle("/inspect/{dapp}", services.CorsMiddleware(http.Handler(inspector))) + server := &http.Server{ - Addr: addr, - Handler: mux, - ErrorLog: slog.NewLogLogger(s.Logger.Handler(), slog.LevelError), + Addr: address, + Handler: inspector.ServeMux, + ErrorLog: slog.NewLogLogger(inspector.Logger.Handler(), slog.LevelError), } - return server, func() error { - s.Logger.Info("Create Inspect Server", "addr", addr) + + return inspector, server, func() error { + maxRetries := 3 // FIXME: should go to config + retryInterval := 5 * time.Second // FIXME: should go to config + inspector.Logger.Info("Create Inspect Server", "address", address) var err error = nil - for retry := 0; retry < maxRetries+1; retry++ { + for retry := 0; retry <= maxRetries; retry++ { switch err = server.ListenAndServe(); err { case http.ErrServerClosed: return nil default: - s.Logger.Error("http", + inspector.Logger.Error("http", "error", err, "try", retry+1, "maxRetries", maxRetries, @@ -76,7 +103,7 @@ func (s *Inspector) CreateInspectServer( func (inspect *Inspector) ServeHTTP(w http.ResponseWriter, r *http.Request) { var ( - dapp Address + dapp string payload []byte err error reports []ReportResponse @@ -91,33 +118,21 @@ func (inspect *Inspector) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - dapp = common.HexToAddress(r.PathValue("dapp")) + dapp = r.PathValue("dapp") if r.Method == "POST" { payload, err = io.ReadAll(r.Body) if err != nil { - inspect.Logger.Info("Bad request", - "err", err) + inspect.Logger.Info("Bad request", "err", err) http.Error(w, err.Error(), http.StatusBadRequest) return } } else { - if r.PathValue("payload") == "" { - inspect.Logger.Info("Bad request", - "err", "Missing payload") - http.Error(w, "Missing payload", http.StatusBadRequest) - return - } - decodedValue, err := url.PathUnescape(r.PathValue("payload")) - if err != nil { - inspect.Logger.Error("Internal server error", - "err", err) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - payload = []byte(decodedValue) + inspect.Logger.Info("HTTP method not supported", "app", dapp) + http.Error(w, "HTTP method not supported", http.StatusNotFound) + return } - inspect.Logger.Info("Got new inspect request", "application", dapp.String()) + inspect.Logger.Info("Got new inspect request", "app", dapp) result, err := inspect.process(r.Context(), dapp, payload) if err != nil { if errors.Is(err, ErrNoApp) { @@ -162,18 +177,26 @@ func (inspect *Inspector) ServeHTTP(w http.ResponseWriter, r *http.Request) { } inspect.Logger.Info("Request executed", "status", status, - "application", dapp.String()) + "application", dapp) } // process sends an inspect request to the machine func (inspect *Inspector) process( ctx context.Context, - app Address, - query []byte) (*nodemachine.InspectResult, error) { + nameOrAddress string, + query []byte) (*InspectResult, error) { + + app, err := inspect.repository.GetApplication(ctx, nameOrAddress) + if app == nil { + if err != nil { + return nil, fmt.Errorf("%w %s", err, nameOrAddress) + } + return nil, fmt.Errorf("%w %s", ErrNoApp, nameOrAddress) + } // Asserts that the app has an associated machine. - machine, exists := inspect.GetInspectMachine(app) + machine, exists := inspect.GetInspectMachine(app.ID) if !exists { - return nil, fmt.Errorf("%w %s", ErrNoApp, app.String()) + return nil, fmt.Errorf("%w %s", ErrNoApp, nameOrAddress) } res, err := machine.Inspect(ctx, query) @@ -183,13 +206,3 @@ func (inspect *Inspector) process( return res, nil } - -// ------------------------------------------------------------------------------------------------ - -type IInspectMachines interface { - GetInspectMachine(app Address) (machines.InspectMachine, bool) -} - -type IInspectMachine interface { - Inspect(_ context.Context, query []byte) (*nodemachine.InspectResult, error) -} diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 7e05c3180..a2d5c701d 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -9,20 +9,18 @@ import ( crand "crypto/rand" "encoding/json" "fmt" - "io" "log/slog" "net/http" - "net/url" - "strings" "testing" "time" "github.com/cartesi/rollups-node/internal/advancer/machines" . "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/nodemachine" "github.com/cartesi/rollups-node/internal/services" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/suite" ) @@ -47,14 +45,14 @@ func (s *InspectSuite) SetupTest() { s.ServiceAddr = fmt.Sprintf("127.0.0.1:%v", s.ServicePort) } -func (s *InspectSuite) TestGetOk() { +func (s *InspectSuite) TestPostOk() { inspect, app, payload := s.setup() ctx, cancel := context.WithCancel(context.Background()) defer cancel() router := http.NewServeMux() - router.Handle("/inspect/{dapp}/{payload}", inspect) + router.Handle("/inspect/{dapp}", inspect) httpService := services.HttpService{Name: "http", Address: s.ServiceAddr, Handler: router} result := make(chan error, 1) @@ -69,24 +67,23 @@ func (s *InspectSuite) TestGetOk() { s.FailNow("timed out waiting for HttpService to be ready") } - resp, err := http.Get(fmt.Sprintf("http://%v/inspect/%v/%v", - s.ServiceAddr, - app.Hex(), - url.PathEscape(string(payload.Bytes())))) + resp, err := http.Post(fmt.Sprintf("http://%v/inspect/%v", s.ServiceAddr, app.IApplicationAddress.Hex()), + "application/octet-stream", + bytes.NewBuffer(payload.Bytes())) if err != nil { s.FailNow(err.Error()) } s.assertResponse(resp, payload.Hex()) } -func (s *InspectSuite) TestGetInvalidPayload() { - inspect, _, _ := s.setup() +func (s *InspectSuite) TestPostWithNameOk() { + inspect, app, payload := s.setup() ctx, cancel := context.WithCancel(context.Background()) defer cancel() router := http.NewServeMux() - router.Handle("/inspect/{dapp}/{payload}", inspect) + router.Handle("/inspect/{dapp}", inspect) httpService := services.HttpService{Name: "http", Address: s.ServiceAddr, Handler: router} result := make(chan error, 1) @@ -101,19 +98,17 @@ func (s *InspectSuite) TestGetInvalidPayload() { s.FailNow("timed out waiting for HttpService to be ready") } - resp, err := http.Get(fmt.Sprintf("http://%v/inspect/%v/test", - s.ServiceAddr, - "0x34416D44EffB07Ac0C31DB485733Aee0b5708F54", - )) - s.Require().Nil(err) - s.Equal(http.StatusNotFound, resp.StatusCode) - buf := new(strings.Builder) - io.Copy(buf, resp.Body) //nolint: errcheck - s.Require().Contains(buf.String(), "Application not found") + resp, err := http.Post(fmt.Sprintf("http://%s/inspect/%s", s.ServiceAddr, app.Name), + "application/octet-stream", + bytes.NewBuffer(payload.Bytes())) + if err != nil { + s.FailNow(err.Error()) + } + s.assertResponse(resp, payload.Hex()) } -func (s *InspectSuite) TestPostOk() { - inspect, app, payload := s.setup() +func (s *InspectSuite) TestPostNoApp() { + inspect, _, payload := s.setup() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -134,27 +129,35 @@ func (s *InspectSuite) TestPostOk() { s.FailNow("timed out waiting for HttpService to be ready") } - resp, err := http.Post(fmt.Sprintf("http://%v/inspect/%v", s.ServiceAddr, app.Hex()), + resp, err := http.Post(fmt.Sprintf("http://%s/inspect/%s", s.ServiceAddr, "Aloha"), "application/octet-stream", bytes.NewBuffer(payload.Bytes())) - if err != nil { - s.FailNow(err.Error()) - } - s.assertResponse(resp, payload.Hex()) + s.Require().Nil(err) + s.Equal(http.StatusNotFound, resp.StatusCode) + + resp, err = http.Post(fmt.Sprintf("http://%s/inspect/%s", s.ServiceAddr, + "0x1000000000000000000000000000000000000000"), + "application/octet-stream", + bytes.NewBuffer(payload.Bytes())) + s.Require().Nil(err) + s.Equal(http.StatusNotFound, resp.StatusCode) } -// Note: add more tests +// FIXME: add more tests -func (s *InspectSuite) setup() (*Inspector, Address, Hash) { - app := randomAddress() +func (s *InspectSuite) setup() (*Inspector, *Application, common.Hash) { + m := newMockMachine(1) + repo := newMockRepository() + repo.apps = append(repo.apps, m.Application) machines := newMockMachines() - machines.Map[app] = &MockMachine{} + machines.Map[1] = *m inspect := &Inspector{ + repository: repo, IInspectMachines: machines, Logger: service.NewLogger(slog.LevelDebug, true), } payload := randomHash() - return inspect, app, payload + return inspect, m.Application, payload } func (s *InspectSuite) assertResponse(resp *http.Response, payload string) { @@ -173,29 +176,31 @@ func (s *InspectSuite) assertResponse(resp *http.Response, payload string) { // ------------------------------------------------------------------------------------------------ type MachinesMock struct { - Map map[Address]machines.InspectMachine + Map map[int64]MockMachine } func newMockMachines() *MachinesMock { return &MachinesMock{ - Map: map[Address]machines.InspectMachine{}, + Map: map[int64]MockMachine{}, } } -func (mock *MachinesMock) GetInspectMachine(app Address) (machines.InspectMachine, bool) { - machine, exists := mock.Map[app] - return machine, exists +func (mock *MachinesMock) GetInspectMachine(appId int64) (machines.InspectMachine, bool) { + machine, exists := mock.Map[appId] + return &machine, exists } // ------------------------------------------------------------------------------------------------ -type MockMachine struct{} +type MockMachine struct { + Application *Application +} func (mock *MockMachine) Inspect( _ context.Context, query []byte, -) (*nodemachine.InspectResult, error) { - var res nodemachine.InspectResult +) (*InspectResult, error) { + var res InspectResult var reports [][]byte reports = append(reports, query) @@ -207,22 +212,51 @@ func (mock *MockMachine) Inspect( return &res, nil } +func newMockMachine(id int64) *MockMachine { + return &MockMachine{ + Application: &Application{ + ID: id, + IApplicationAddress: randomAddress(), + Name: fmt.Sprintf("app-%v", id), + }, + } +} + +// ------------------------------------------------------------------------------------------------ + +type MockRepository struct { + apps []*Application +} + +func (mock *MockRepository) GetApplication(ctx context.Context, nameOrAddress string) (*Application, error) { + for _, app := range mock.apps { + if app.Name == nameOrAddress || app.IApplicationAddress == common.HexToAddress(nameOrAddress) { + return app, nil + } + } + return nil, nil +} + +func newMockRepository() *MockRepository { + return &MockRepository{apps: []*Application{}} +} + // ------------------------------------------------------------------------------------------------ -func randomAddress() Address { +func randomAddress() common.Address { address := make([]byte, 20) _, err := crand.Read(address) if err != nil { panic(err) } - return Address(address) + return common.Address(address) } -func randomHash() Hash { +func randomHash() common.Hash { hash := make([]byte, 32) _, err := crand.Read(hash) if err != nil { panic(err) } - return Hash(hash) + return common.Hash(hash) } diff --git a/internal/merkle/proof.go b/internal/merkle/proof.go index d867a4d9a..51431691d 100644 --- a/internal/merkle/proof.go +++ b/internal/merkle/proof.go @@ -6,7 +6,7 @@ package merkle import ( "fmt" - "github.com/cartesi/rollups-node/internal/model" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) @@ -16,12 +16,12 @@ import ( // // If the number of leaves exceeds the capacity for the given height, // an error is returned. -func CreateProofs(leaves []model.Hash, height uint) (model.Hash, []model.Hash, error) { - pristineNode := model.Hash{} +func CreateProofs(leaves []common.Hash, height uint) (common.Hash, []common.Hash, error) { + pristineNode := common.Hash{} currentLevel := leaves leafCount := uint(len(leaves)) - siblings := make([]model.Hash, leafCount*height) + siblings := make([]common.Hash, leafCount*height) // for each level in the tree, starting from the leaves for levelIdx := uint(0); levelIdx < height; levelIdx++ { @@ -47,7 +47,7 @@ func CreateProofs(leaves []model.Hash, height uint) (model.Hash, []model.Hash, e // in the end, current level is the root level if len(currentLevel) > 1 { err := fmt.Errorf("too many leaves [%d] for height [%d]", leafCount, height) - return model.Hash{}, nil, err + return common.Hash{}, nil, err } return *at(currentLevel, 0, &pristineNode), siblings, nil @@ -62,7 +62,7 @@ func CreateProofs(leaves []model.Hash, height uint) (model.Hash, []model.Hash, e // The parent nodes are stored in the first half of the original level slice. // // The function returns the parent level by re-slicing the original level slice. -func parentLevel(level []model.Hash, pristineNode *model.Hash) []model.Hash { +func parentLevel(level []common.Hash, pristineNode *common.Hash) []common.Hash { // for each pair of nodes in level for idx := 0; idx < len(level); idx += 2 { leftChild := level[idx][:] @@ -75,7 +75,7 @@ func parentLevel(level []model.Hash, pristineNode *model.Hash) []model.Hash { } // at returns a pointer to the item located at index or the default value. -func at(array []model.Hash, index uint, defaultValue *model.Hash) *model.Hash { +func at(array []common.Hash, index uint, defaultValue *common.Hash) *common.Hash { if index < uint(len(array)) { return &array[index] } else { diff --git a/internal/merkle/proof_test.go b/internal/merkle/proof_test.go index 8b6e94af6..76a107ba6 100644 --- a/internal/merkle/proof_test.go +++ b/internal/merkle/proof_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/cartesi/rollups-node/internal/model" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/suite" @@ -16,7 +15,7 @@ import ( type CreateProofsSuite struct { suite.Suite - pristine []model.Hash + pristine []common.Hash } func TestCreateProofsSuite(t *testing.T) { @@ -25,7 +24,7 @@ func TestCreateProofsSuite(t *testing.T) { func (s *CreateProofsSuite) SetupSuite() { maxHeight := 4 - s.pristine = make([]model.Hash, maxHeight) + s.pristine = make([]common.Hash, maxHeight) for height := 1; height < maxHeight; height++ { s.pristine[height] = crypto.Keccak256Hash( @@ -48,7 +47,7 @@ func (s *CreateProofsSuite) TestZeroHeight() { s.Run("one leaf", func() { leaf := crypto.Keccak256Hash([]byte("Cartesi")) - root, siblings, err := CreateProofs([]model.Hash{leaf}, 0) + root, siblings, err := CreateProofs([]common.Hash{leaf}, 0) s.Require().Nil(err) s.Equal(leaf, root) @@ -56,7 +55,7 @@ func (s *CreateProofsSuite) TestZeroHeight() { }) s.Run("two leaves", func() { - leaves := make([]model.Hash, 2) + leaves := make([]common.Hash, 2) _, _, err := CreateProofs(leaves, 0) s.Require().NotNil(err) @@ -79,7 +78,7 @@ func (s *CreateProofsSuite) TestHeightOne() { }) s.Run("one leaf", func() { - leaves := []model.Hash{leaf1} + leaves := []common.Hash{leaf1} root, siblings, err := CreateProofs(leaves, uint(height)) s.Require().Nil(err) @@ -97,8 +96,8 @@ func (s *CreateProofsSuite) TestHeightOne() { }) s.Run("two leaves", func() { - leaves := []model.Hash{leaf1, leaf2} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -120,7 +119,7 @@ func (s *CreateProofsSuite) TestHeightOne() { }) s.Run("three leaves", func() { - leaves := make([]model.Hash, 3) + leaves := make([]common.Hash, 3) _, _, err := CreateProofs(leaves, 1) s.Require().NotNil(err) @@ -146,7 +145,7 @@ func (s *CreateProofsSuite) TestHeightTwo() { }) s.Run("one leaf", func() { - leaves := []model.Hash{leaf1} + leaves := []common.Hash{leaf1} root, siblings, err := CreateProofs(leaves, uint(height)) s.Require().Nil(err) @@ -167,8 +166,8 @@ func (s *CreateProofsSuite) TestHeightTwo() { }) s.Run("two leaves", func() { - leaves := []model.Hash{leaf1, leaf2} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -195,8 +194,8 @@ func (s *CreateProofsSuite) TestHeightTwo() { }) s.Run("three leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -225,8 +224,8 @@ func (s *CreateProofsSuite) TestHeightTwo() { }) s.Run("four leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -257,7 +256,7 @@ func (s *CreateProofsSuite) TestHeightTwo() { }) s.Run("five leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4, leaf1} + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4, leaf1} _, _, err := CreateProofs(leaves, uint(height)) s.Require().NotNil(err) @@ -287,7 +286,7 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("one leaf", func() { - leaves := []model.Hash{leaf1} + leaves := []common.Hash{leaf1} root, siblings, err := CreateProofs(leaves, uint(height)) s.Require().Nil(err) @@ -313,8 +312,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("two leaves", func() { - leaves := []model.Hash{leaf1, leaf2} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -346,8 +345,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("three leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -382,8 +381,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("four leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -421,8 +420,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("five leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4, leaf5} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4, leaf5} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -496,8 +495,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("six leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4, leaf5, leaf6} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4, leaf5, leaf6} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -580,8 +579,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("seven leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4, leaf5, leaf6, leaf7} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4, leaf5, leaf6, leaf7} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -673,8 +672,8 @@ func (s *CreateProofsSuite) TestHeightThree() { }) s.Run("eight leaves", func() { - leaves := []model.Hash{leaf1, leaf2, leaf3, leaf4, leaf5, leaf6, leaf7, leaf8} - leavesCopy := make([]model.Hash, len(leaves)) + leaves := []common.Hash{leaf1, leaf2, leaf3, leaf4, leaf5, leaf6, leaf7, leaf8} + leavesCopy := make([]common.Hash, len(leaves)) copy(leavesCopy, leaves) root, siblings, err := CreateProofs(leavesCopy, uint(height)) @@ -778,7 +777,7 @@ func (s *CreateProofsSuite) TestHeightThree() { // This test was taken from the libcmt suite as a method to compare // both implementations func (s *CreateProofsSuite) TestItMatchesMachineImplementation() { - leaves := []model.Hash{ + leaves := []common.Hash{ crypto.Keccak256Hash([]byte("Cartesi")), crypto.Keccak256Hash([]byte("Merkle")), crypto.Keccak256Hash([]byte("Tree")), @@ -800,7 +799,7 @@ func FuzzVerifyProofs(f *testing.F) { leafCount = bound(leafCount, 1, 1< Postgres +// - "sqlite://some/path.db" => SQLite +// +// Then it initializes the repo, runs migrations, and returns it. +func NewRepositoryFromConnectionString(ctx context.Context, conn string) (Repository, error) { + lowerConn := strings.ToLower(conn) + switch { + case strings.HasPrefix(lowerConn, "postgres://"): + return newPostgresRepository(ctx, conn) + // case strings.HasPrefix(lowerConn, "sqlite://"): + // return newSQLiteRepository(ctx, conn) + default: + return nil, fmt.Errorf("unrecognized connection string format: %s", conn) + } +} + +func newPostgresRepository(ctx context.Context, conn string) (Repository, error) { + pgRepo, err := postgres.NewPostgresRepository(ctx, conn, 5, 3*time.Second) // FIXME: get from config + if err != nil { + return nil, err + } + + return pgRepo, nil +} + +// func newSQLiteRepository(ctx context.Context, conn string) (Repository, error) { +// // Typically parse out the file from the "sqlite://somefile.db" connection string, +// // open database, etc. +// sqliteRepo, err := sqlite.NewSQLiteRepository(ctx, conn) +// if err != nil { +// return nil, err +// } +// +// // run migrations for SQLite, if applicable +// if err := migration.EnsureMigrationsSQLite(ctx, conn); err != nil { +// sqliteRepo.Close() +// return nil, err +// } +// +// return sqliteRepo, nil +// } diff --git a/internal/repository/machine.go b/internal/repository/machine.go deleted file mode 100644 index d55387b03..000000000 --- a/internal/repository/machine.go +++ /dev/null @@ -1,333 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -package repository - -import ( - "context" - "errors" - "fmt" - "strings" - - . "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/nodemachine" - - "github.com/jackc/pgx/v5" -) - -var ErrAdvancerRepository = errors.New("advancer repository error") - -func (repo *Database) GetMachineConfigurations( - ctx context.Context, -) ([]*MachineConfig, error) { - // Query string to fetch application and execution parameters for "running" applications - query := ` - SELECT - a.contract_address, - a.template_uri, - e.advance_inc_cycles, - e.advance_max_cycles, - e.inspect_inc_cycles, - e.inspect_max_cycles, - e.advance_inc_deadline, - e.advance_max_deadline, - e.inspect_inc_deadline, - e.inspect_max_deadline, - e.load_deadline, - e.store_deadline, - e.fast_deadline, - e.max_concurrent_inspects, - 0 AS processed_inputs - FROM application a - INNER JOIN execution_parameters e - ON a.id = e.application_id - WHERE a.status = 'RUNNING'; - ` - - // Prepare the result slice - var machineConfigs []*MachineConfig - - // Execute the query - rows, err := repo.db.Query(ctx, query) - if err != nil { - return nil, fmt.Errorf("failed to execute query: %w", err) - } - defer rows.Close() - - // Iterate over the result rows - for rows.Next() { - var mc MachineConfig - - // Scan the database values into the MachineConfig struct fields - err := rows.Scan( - &mc.AppAddress, // contract_address - &mc.SnapshotPath, // template_uri - &mc.AdvanceIncCycles, // advance_inc_cycles - &mc.AdvanceMaxCycles, // advance_max_cycles - &mc.InspectIncCycles, // inspect_inc_cycles - &mc.InspectMaxCycles, // inspect_max_cycles - &mc.AdvanceIncDeadline, // advance_inc_deadline - &mc.AdvanceMaxDeadline, // advance_max_deadline - &mc.InspectIncDeadline, // inspect_inc_deadline - &mc.InspectMaxDeadline, // inspect_max_deadline - &mc.LoadDeadline, // load_deadline - &mc.StoreDeadline, // store_deadline - &mc.FastDeadline, // fast_deadline - &mc.MaxConcurrentInspects, // max_concurrent_inspects - &mc.ProcessedInputs, // processed_inputs - ) - if err != nil { - return nil, fmt.Errorf("failed to scan row: %w", err) - } - - // Append the result to the slice - machineConfigs = append(machineConfigs, &mc) - } - - // Check if any error occurred during iteration - if rows.Err() != nil { - return nil, fmt.Errorf("row iteration error: %w", rows.Err()) - } - - // Return the result slice - return machineConfigs, nil -} - -func (repo *Database) GetProcessedInputs( - ctx context.Context, - app Address, - index uint64, -) ([]*Input, error) { - query := ` - SELECT id, index, status, raw_data, epoch_id - FROM input - WHERE application_address = @applicationAddress - AND index >= @index - AND status != 'NONE' - ORDER BY index ASC - ` - args := pgx.NamedArgs{ - "applicationAddress": app, - "index": index, - } - rows, err := repo.db.Query(ctx, query, args) - if err != nil { - return nil, fmt.Errorf("%w (failed querying inputs): %w", ErrAdvancerRepository, err) - } - - res := []*Input{} - var input Input - scans := []any{&input.Id, &input.Index, &input.CompletionStatus, &input.RawData, &input.EpochId} - _, err = pgx.ForEachRow(rows, scans, func() error { - input := input - res = append(res, &input) - return nil - }) - if err != nil { - return nil, fmt.Errorf("%w (failed reading input rows): %w", ErrAdvancerRepository, err) - } - - return res, nil -} - -func (repo *Database) GetUnprocessedInputs( - ctx context.Context, - apps []Address, -) (map[Address][]*Input, error) { - result := map[Address][]*Input{} - if len(apps) == 0 { - return result, nil - } - - query := fmt.Sprintf(` - SELECT id, application_address, raw_data, index, epoch_id - FROM input - WHERE status = 'NONE' - AND application_address IN %s - ORDER BY index ASC, application_address - `, addressesToSqlInValues(apps)) // NOTE: not sanitized - rows, err := repo.db.Query(ctx, query) - if err != nil { - return nil, fmt.Errorf("%w (failed querying inputs): %w", ErrAdvancerRepository, err) - } - - var input Input - scans := []any{&input.Id, &input.AppAddress, &input.RawData, &input.Index, &input.EpochId} - _, err = pgx.ForEachRow(rows, scans, func() error { - input := input - if _, ok := result[input.AppAddress]; ok { //nolint:gosimple - result[input.AppAddress] = append(result[input.AppAddress], &input) - } else { - result[input.AppAddress] = []*Input{&input} - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("%w (failed reading input rows): %w", ErrAdvancerRepository, err) - } - - return result, nil -} - -func (repo *Database) StoreAdvanceResult( - ctx context.Context, - input *Input, - res *nodemachine.AdvanceResult, -) error { - tx, err := repo.db.Begin(ctx) - if err != nil { - return errors.Join(ErrBeginTx, err) - } - - // Inserts the outputs. - nextOutputIndex, err := repo.getNextIndex(ctx, tx, "output", input.AppAddress) - if err != nil { - return err - } - err = repo.insert(ctx, tx, "output", res.Outputs, input.Id, nextOutputIndex) - if err != nil { - return err - } - - // Inserts the reports. - nextReportIndex, err := repo.getNextIndex(ctx, tx, "report", input.AppAddress) - if err != nil { - return err - } - err = repo.insert(ctx, tx, "report", res.Reports, input.Id, nextReportIndex) - if err != nil { - return err - } - - // Updates the input's status. - err = repo.updateInput(ctx, tx, input.Id, res.Status, res.OutputsHash, res.MachineHash) - if err != nil { - return err - } - - err = tx.Commit(ctx) - if err != nil { - return errors.Join(ErrCommitTx, err, tx.Rollback(ctx)) - } - - return nil -} - -func (repo *Database) UpdateClosedEpochs(ctx context.Context, app Address) error { - query := ` - UPDATE epoch - SET status = 'PROCESSED_ALL_INPUTS' - WHERE id IN (( - SELECT DISTINCT epoch.id - FROM epoch INNER JOIN input ON (epoch.id = input.epoch_id) - WHERE epoch.application_address = @applicationAddress - AND epoch.status = 'CLOSED' - AND input.status != 'NONE' - ) EXCEPT ( - SELECT DISTINCT epoch.id - FROM epoch INNER JOIN input ON (epoch.id = input.epoch_id) - WHERE epoch.application_address = @applicationAddress - AND epoch.status = 'CLOSED' - AND input.status = 'NONE')) - ` - args := pgx.NamedArgs{"applicationAddress": app} - _, err := repo.db.Exec(ctx, query, args) - if err != nil { - return errors.Join(ErrUpdateRow, err) - } - return nil -} - -// ------------------------------------------------------------------------------------------------ - -func (_ *Database) getNextIndex( - ctx context.Context, - tx pgx.Tx, - tableName string, - appAddress Address, -) (uint64, error) { - var nextIndex uint64 - query := fmt.Sprintf(` - SELECT COALESCE(MAX(%s.index) + 1, 0) - FROM input INNER JOIN %s ON input.id = %s.input_id - WHERE input.status = 'ACCEPTED' - AND input.application_address = $1 - `, tableName, tableName, tableName) - err := tx.QueryRow(ctx, query, appAddress).Scan(&nextIndex) - if err != nil { - err = fmt.Errorf("failed to get the next %s index: %w", tableName, err) - return 0, errors.Join(err, tx.Rollback(ctx)) - } - return nextIndex, nil -} - -func (_ *Database) insert( - ctx context.Context, - tx pgx.Tx, - tableName string, - dataArray [][]byte, - inputId uint64, - nextIndex uint64, -) error { - lenOutputs := int64(len(dataArray)) - if lenOutputs < 1 { - return nil - } - - rows := [][]any{} - for i, data := range dataArray { - rows = append(rows, []any{inputId, nextIndex + uint64(i), data}) - } - - count, err := tx.CopyFrom( - ctx, - pgx.Identifier{tableName}, - []string{"input_id", "index", "raw_data"}, - pgx.CopyFromRows(rows), - ) - if err != nil { - return errors.Join(ErrCopyFrom, err, tx.Rollback(ctx)) - } - if lenOutputs != count { - err := fmt.Errorf("not all %ss were inserted (%d != %d)", tableName, lenOutputs, count) - return errors.Join(err, tx.Rollback(ctx)) - } - - return nil -} - -func (_ *Database) updateInput( - ctx context.Context, - tx pgx.Tx, - inputId uint64, - status InputCompletionStatus, - outputsHash Hash, - machineHash *Hash, -) error { - query := ` - UPDATE input - SET (status, outputs_hash, machine_hash) = (@status, @outputsHash, @machineHash) - WHERE id = @id - ` - args := pgx.NamedArgs{ - "status": status, - "outputsHash": outputsHash, - "machineHash": machineHash, - "id": inputId, - } - _, err := tx.Exec(ctx, query, args) - if err != nil { - return errors.Join(ErrUpdateRow, err, tx.Rollback(ctx)) - } - return nil -} - -// ------------------------------------------------------------------------------------------------ - -func addressesToSqlInValues[T fmt.Stringer](a []T) string { - s := []string{} - for _, x := range a { - s = append(s, fmt.Sprintf("'\\x%s'", x.String()[2:])) - } - return fmt.Sprintf("(%s)", strings.Join(s, ", ")) -} diff --git a/internal/repository/machine_test.go b/internal/repository/machine_test.go deleted file mode 100644 index b40f86b14..000000000 --- a/internal/repository/machine_test.go +++ /dev/null @@ -1,334 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -package repository - -import ( - "context" - "log/slog" - "testing" - - . "github.com/cartesi/rollups-node/internal/model" - - "github.com/cartesi/rollups-node/pkg/rollupsmachine" - "github.com/cartesi/rollups-node/pkg/service" - "github.com/ethereum/go-ethereum/common" - - "github.com/cartesi/rollups-node/test/tooling/db" - "github.com/stretchr/testify/require" -) - -func TestMachineRepository(t *testing.T) { - ctx := context.Background() - - t.Run("GetMachineConfigurations", func(t *testing.T) { - require := require.New(t) - - var err error - endpoint, err := db.GetPostgresTestEndpoint() - require.Nil(err) - - err = db.SetupTestPostgres(endpoint) - require.Nil(err) - - database, err := Connect(ctx, endpoint, service.NewLogger(slog.LevelDebug, true)) - require.Nil(err) - require.NotNil(database) - - apps, _, _, _, err := populate2(database) - require.Nil(err) - require.Len(apps, 3) - - // only running apps - res, err := database.GetMachineConfigurations(ctx) - require.Nil(err) - require.Len(res, 2) - - var config1, config2 *MachineConfig - for _, config := range res { - if config.AppAddress == apps[1].ContractAddress { - config2 = config - } else if config.AppAddress == apps[2].ContractAddress { - config1 = config - } - } - require.NotNil(config1) - require.NotNil(config2) - - require.Equal(apps[1].ContractAddress, config2.AppAddress) - require.Equal(uint64(0), config2.ProcessedInputs) - require.Equal("path/to/template/uri/1", config2.SnapshotPath) - - require.Equal(apps[2].ContractAddress, config1.AppAddress) - require.Equal(uint64(0), config1.ProcessedInputs) - require.Equal("path/to/template/uri/2", config1.SnapshotPath) - }) - - t.Run("GetProcessedInputs", func(t *testing.T) { - t.Skip("TODO") - }) - - t.Run("GetUnprocessedInputs", func(t *testing.T) { - t.Skip("TODO") - }) - - t.Run("StoreAdvanceResult", func(t *testing.T) { - t.Skip("TODO") - }) - - t.Run("UpdateEpochs", func(t *testing.T) { - require := require.New(t) - - var err error - endpoint, err := db.GetPostgresTestEndpoint() - require.Nil(err) - - err = db.SetupTestPostgres(endpoint) - require.Nil(err) - - database, err := Connect(ctx, endpoint, service.NewLogger(slog.LevelDebug, true)) - require.Nil(err) - require.NotNil(database) - - app, _, _, err := populate1(database) - require.Nil(err) - - err = database.UpdateClosedEpochs(ctx, app.ContractAddress) - require.Nil(err) - - epoch0, err := database.GetEpoch(ctx, 0, app.ContractAddress) - require.Nil(err) - require.NotNil(epoch0) - - epoch1, err := database.GetEpoch(ctx, 1, app.ContractAddress) - require.Nil(err) - require.NotNil(epoch1) - - epoch2, err := database.GetEpoch(ctx, 2, app.ContractAddress) - require.Nil(err) - require.NotNil(epoch2) - - epoch3, err := database.GetEpoch(ctx, 3, app.ContractAddress) - require.Nil(err) - require.NotNil(epoch3) - - require.Equal(EpochStatusProcessedAllInputs, epoch0.Status) - require.Equal(EpochStatusProcessedAllInputs, epoch1.Status) - require.Equal(EpochStatusClosed, epoch2.Status) - require.Equal(EpochStatusOpen, epoch3.Status) - }) -} - -// ------------------------------------------------------------------------------------------------ - -func populate1(database *Database) (*Application, []*Epoch, []*Input, error) { - ctx := context.Background() - - app := &Application{ - ContractAddress: common.HexToAddress("deadbeef"), - IConsensusAddress: common.HexToAddress("beefdead"), - TemplateHash: [32]byte{}, - LastProcessedBlock: 0, - Status: "RUNNING", - } - - _, err := database.InsertApplication(ctx, app) - if err != nil { - return nil, nil, nil, err - } - - epochs := []*Epoch{{ - FirstBlock: 0, - LastBlock: 1, - Status: EpochStatusClosed, - }, { - FirstBlock: 2, - LastBlock: 3, - Status: EpochStatusClosed, - }, { - FirstBlock: 4, - LastBlock: 5, - Status: EpochStatusClosed, - }, { - FirstBlock: 6, - LastBlock: 7, - Status: EpochStatusOpen, - }} - - for i, epoch := range epochs { - epoch.Index = uint64(i) - epoch.AppAddress = app.ContractAddress - epoch.Id, err = database.InsertEpoch(ctx, epoch) - if err != nil { - return nil, nil, nil, err - } - } - - inputs := []*Input{{ - EpochId: epochs[0].Id, - CompletionStatus: InputStatusAccepted, - RawData: []byte("first input"), - }, { - EpochId: epochs[0].Id, - CompletionStatus: InputStatusRejected, - RawData: []byte("second input"), - }, { - EpochId: epochs[1].Id, - CompletionStatus: InputStatusException, - RawData: []byte("third input"), - }, { - EpochId: epochs[1].Id, - CompletionStatus: InputStatusAccepted, - RawData: []byte("fourth input"), - }, { - EpochId: epochs[2].Id, - CompletionStatus: InputStatusAccepted, - RawData: []byte("fifth input"), - }, { - EpochId: epochs[2].Id, - CompletionStatus: InputStatusNone, - RawData: []byte("sixth input"), - }, { - EpochId: epochs[3].Id, - CompletionStatus: InputStatusNone, - RawData: []byte("seventh input"), - }} - - for i, input := range inputs { - input.Index = uint64(i) - input.BlockNumber = uint64(i) - input.AppAddress = app.ContractAddress - - input.RawData, err = rollupsmachine.Input{Data: input.RawData}.Encode() - if err != nil { - return nil, nil, nil, err - } - - input.Id, err = database.InsertInput(ctx, input) - if err != nil { - return nil, nil, nil, err - } - } - - return app, epochs, inputs, nil -} - -// ------------------------------------------------------------------------------------------------ - -func populate2(database *Database) ([]*Application, []*Epoch, []*Input, []*Snapshot, error) { - ctx := context.Background() - - apps := []*Application{{ - ContractAddress: common.HexToAddress("dead"), - TemplateUri: "path/to/template/uri/0", - Status: ApplicationStatusNotRunning, - }, { - ContractAddress: common.HexToAddress("beef"), - TemplateUri: "path/to/template/uri/1", - Status: ApplicationStatusRunning, - }, { - ContractAddress: common.HexToAddress("bead"), - TemplateUri: "path/to/template/uri/2", - Status: ApplicationStatusRunning, - }} - if err := database.InsertApps(ctx, apps); err != nil { - return nil, nil, nil, nil, err - } - - epochs := []*Epoch{{ - Index: 0, - Status: EpochStatusClosed, - AppAddress: apps[1].ContractAddress, - }, { - Index: 1, - Status: EpochStatusClosed, - AppAddress: apps[1].ContractAddress, - }, { - Status: EpochStatusClosed, - AppAddress: apps[2].ContractAddress, - }} - err := database.InsertEpochs(ctx, epochs) - if err != nil { - return nil, nil, nil, nil, err - } - - inputs := []*Input{{ - Index: 0, - CompletionStatus: InputStatusAccepted, - RawData: []byte("first"), - AppAddress: apps[1].ContractAddress, - EpochId: epochs[0].Id, - }, { - Index: 1, - CompletionStatus: InputStatusAccepted, - RawData: []byte("second"), - AppAddress: apps[1].ContractAddress, - EpochId: epochs[1].Id, - }} - err = database.InsertInputs(ctx, inputs) - if err != nil { - return nil, nil, nil, nil, err - } - - snapshots := []*Snapshot{{ - URI: "path/to/snapshot/1", - InputId: inputs[0].Id, - AppAddress: apps[1].ContractAddress, - }, { - URI: "path/to/snapshot/2", - InputId: inputs[1].Id, - AppAddress: apps[1].ContractAddress, - }} - err = database.InsertSnapshots(ctx, snapshots) - if err != nil { - return nil, nil, nil, nil, err - } - - return apps, epochs, inputs, snapshots, nil -} - -// ------------------------------------------------------------------------------------------------ - -func (pg *Database) InsertApps(ctx context.Context, apps []*Application) error { - var err error - for _, app := range apps { - app.Id, err = pg.InsertApplication(ctx, app) - if err != nil { - return err - } - } - return nil -} - -func (pg *Database) InsertEpochs(ctx context.Context, epochs []*Epoch) error { - var err error - for _, epoch := range epochs { - epoch.Id, err = pg.InsertEpoch(ctx, epoch) - if err != nil { - return err - } - } - return nil -} - -func (pg *Database) InsertInputs(ctx context.Context, inputs []*Input) error { - var err error - for _, input := range inputs { - input.Id, err = pg.InsertInput(ctx, input) - if err != nil { - return err - } - } - return nil -} - -func (pg *Database) InsertSnapshots(ctx context.Context, snapshots []*Snapshot) error { - var err error - for _, snapshot := range snapshots { - snapshot.Id, err = pg.InsertSnapshot(ctx, snapshot) - if err != nil { - return err - } - } - return nil -} diff --git a/internal/repository/postgres/application.go b/internal/repository/postgres/application.go new file mode 100644 index 000000000..04b724560 --- /dev/null +++ b/internal/repository/postgres/application.go @@ -0,0 +1,499 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ ApplicationRepository Methods ------------------------ // + +func (r *postgresRepository) CreateApplication( + ctx context.Context, + app *model.Application, +) (int64, error) { + + insertStmt := table.Application. + INSERT( + table.Application.Name, + table.Application.IapplicationAddress, + table.Application.IconsensusAddress, + table.Application.TemplateHash, + table.Application.TemplateURI, + table.Application.EpochLength, + table.Application.State, + table.Application.LastProcessedBlock, + table.Application.LastClaimCheckBlock, + table.Application.LastOutputCheckBlock, + table.Application.ProcessedInputs, + ). + VALUES( + app.Name, + app.IApplicationAddress, + app.IConsensusAddress, + app.TemplateHash, + app.TemplateURI, + app.EpochLength, + app.State, + app.LastProcessedBlock, + app.LastClaimCheckBlock, + app.LastOutputCheckBlock, + app.ProcessedInputs, + ). + RETURNING(table.Application.ID) + + tx, err := r.db.Begin(ctx) + if err != nil { + return 0, err + } + + sqlStr, args := insertStmt.Sql() + var newID int64 + err = tx.QueryRow(ctx, sqlStr, args...).Scan(&newID) + if err != nil { + return 0, errors.Join(err, tx.Rollback(ctx)) + } + + sqlStr, args = table.ExecutionParameters. + INSERT( + table.ExecutionParameters.ApplicationID, + ). + VALUES( + newID, + ).Sql() + + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return 0, errors.Join(err, tx.Rollback(ctx)) + } + + err = tx.Commit(ctx) + if err != nil { + return 0, errors.Join(err, tx.Rollback(ctx)) + } + return newID, nil +} + +// GetApplication retrieves one application by ID, optionally loading status & execution parameters. +func (r *postgresRepository) GetApplication( + ctx context.Context, + nameOrAddress string, +) (*model.Application, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + stmt := table.Application. + SELECT( + table.Application.ID, + table.Application.Name, + table.Application.IapplicationAddress, + table.Application.IconsensusAddress, + table.Application.TemplateHash, + table.Application.TemplateURI, + table.Application.EpochLength, + table.Application.State, + table.Application.Reason, + table.Application.LastProcessedBlock, + table.Application.LastClaimCheckBlock, + table.Application.LastOutputCheckBlock, + table.Application.ProcessedInputs, + table.Application.CreatedAt, + table.Application.UpdatedAt, + table.ExecutionParameters.ApplicationID, + table.ExecutionParameters.SnapshotPolicy, + table.ExecutionParameters.SnapshotRetention, + table.ExecutionParameters.AdvanceIncCycles, + table.ExecutionParameters.AdvanceMaxCycles, + table.ExecutionParameters.InspectIncCycles, + table.ExecutionParameters.InspectMaxCycles, + table.ExecutionParameters.AdvanceIncDeadline, + table.ExecutionParameters.AdvanceMaxDeadline, + table.ExecutionParameters.InspectIncDeadline, + table.ExecutionParameters.InspectMaxDeadline, + table.ExecutionParameters.LoadDeadline, + table.ExecutionParameters.StoreDeadline, + table.ExecutionParameters.FastDeadline, + table.ExecutionParameters.MaxConcurrentInspects, + table.ExecutionParameters.CreatedAt, + table.ExecutionParameters.UpdatedAt, + ). + FROM( + table.Application.INNER_JOIN( + table.ExecutionParameters, + table.ExecutionParameters.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE(whereClause) + + sqlStr, args := stmt.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var app model.Application + err = row.Scan( + &app.ID, + &app.Name, + &app.IApplicationAddress, + &app.IConsensusAddress, + &app.TemplateHash, + &app.TemplateURI, + &app.EpochLength, + &app.State, + &app.Reason, + &app.LastProcessedBlock, + &app.LastClaimCheckBlock, + &app.LastOutputCheckBlock, + &app.ProcessedInputs, + &app.CreatedAt, + &app.UpdatedAt, + &app.ExecutionParameters.ApplicationID, + &app.ExecutionParameters.SnapshotPolicy, + &app.ExecutionParameters.SnapshotRetention, + &app.ExecutionParameters.AdvanceIncCycles, + &app.ExecutionParameters.AdvanceMaxCycles, + &app.ExecutionParameters.InspectIncCycles, + &app.ExecutionParameters.InspectMaxCycles, + &app.ExecutionParameters.AdvanceIncDeadline, + &app.ExecutionParameters.AdvanceMaxDeadline, + &app.ExecutionParameters.InspectIncDeadline, + &app.ExecutionParameters.InspectMaxDeadline, + &app.ExecutionParameters.LoadDeadline, + &app.ExecutionParameters.StoreDeadline, + &app.ExecutionParameters.FastDeadline, + &app.ExecutionParameters.MaxConcurrentInspects, + &app.ExecutionParameters.CreatedAt, + &app.ExecutionParameters.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil // not found + } + if err != nil { + return nil, err + } + + return &app, nil +} + +// UpdateApplication updates an existing application row. +func (r *postgresRepository) UpdateApplication( + ctx context.Context, + app *model.Application, +) error { + + updateStmt := table.Application. + UPDATE( + table.Application.Name, + table.Application.IapplicationAddress, + table.Application.IconsensusAddress, + table.Application.TemplateHash, + table.Application.TemplateURI, + table.Application.EpochLength, + table.Application.State, + table.Application.Reason, + table.Application.LastProcessedBlock, + table.Application.LastClaimCheckBlock, + table.Application.LastOutputCheckBlock, + table.Application.ProcessedInputs, + ). + SET( + app.Name, + app.IApplicationAddress, + app.IConsensusAddress, + app.TemplateHash, + app.TemplateURI, + app.EpochLength, + app.State, + app.Reason, + app.LastProcessedBlock, + app.LastClaimCheckBlock, + app.LastOutputCheckBlock, + app.ProcessedInputs, + ). + WHERE(table.Application.ID.EQ(postgres.Int(app.ID))) + + sqlStr, args := updateStmt.Sql() + _, err := r.db.Exec(ctx, sqlStr, args...) + return err +} + +func (r *postgresRepository) UpdateApplicationState( + ctx context.Context, + app *model.Application, +) error { + + updateStmt := table.Application. + UPDATE( + table.Application.State, + table.Application.Reason, + ). + SET( + app.State, + app.Reason, + ). + WHERE(table.Application.ID.EQ(postgres.Int(app.ID))) + + sqlStr, args := updateStmt.Sql() + _, err := r.db.Exec(ctx, sqlStr, args...) + return err +} + +// DeleteApplication removes the row from "application" by ID. +func (r *postgresRepository) DeleteApplication( + ctx context.Context, + id int64, +) error { + + delStmt := table.Application. + DELETE(). + WHERE(table.Application.ID.EQ(postgres.Int(id))) + + sqlStr, args := delStmt.Sql() + _, err := r.db.Exec(ctx, sqlStr, args...) + return err +} + +// ListApplications queries multiple apps with optional filters & pagination. +func (r *postgresRepository) ListApplications( + ctx context.Context, + f repository.ApplicationFilter, + p repository.Pagination, +) ([]*model.Application, error) { + + sel := table.Application. + SELECT( + table.Application.ID, + table.Application.Name, + table.Application.IapplicationAddress, + table.Application.IconsensusAddress, + table.Application.TemplateHash, + table.Application.TemplateURI, + table.Application.EpochLength, + table.Application.State, + table.Application.Reason, + table.Application.LastProcessedBlock, + table.Application.LastClaimCheckBlock, + table.Application.LastOutputCheckBlock, + table.Application.ProcessedInputs, + table.Application.CreatedAt, + table.Application.UpdatedAt, + table.ExecutionParameters.ApplicationID, + table.ExecutionParameters.SnapshotPolicy, + table.ExecutionParameters.SnapshotRetention, + table.ExecutionParameters.AdvanceIncCycles, + table.ExecutionParameters.AdvanceMaxCycles, + table.ExecutionParameters.InspectIncCycles, + table.ExecutionParameters.InspectMaxCycles, + table.ExecutionParameters.AdvanceIncDeadline, + table.ExecutionParameters.AdvanceMaxDeadline, + table.ExecutionParameters.InspectIncDeadline, + table.ExecutionParameters.InspectMaxDeadline, + table.ExecutionParameters.LoadDeadline, + table.ExecutionParameters.StoreDeadline, + table.ExecutionParameters.FastDeadline, + table.ExecutionParameters.MaxConcurrentInspects, + table.ExecutionParameters.CreatedAt, + table.ExecutionParameters.UpdatedAt, + ). + FROM( + table.Application.INNER_JOIN( + table.ExecutionParameters, + table.ExecutionParameters.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{} + if f.State != nil { + conditions = append(conditions, table.Application.State.EQ(postgres.NewEnumValue(f.State.String()))) + } + + if f.Name != nil { + conditions = append(conditions, table.Application.Name.EQ(postgres.VarChar()(*f.Name))) + } + + if len(conditions) > 0 { + sel = sel.WHERE(postgres.AND(conditions...)) + } + + sel.ORDER_BY(table.Application.Name.ASC()) + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(p.Limit) + } + if p.Offset > 0 { + sel = sel.OFFSET(p.Offset) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var apps []*model.Application + for rows.Next() { + var app model.Application + err := rows.Scan( + &app.ID, + &app.Name, + &app.IApplicationAddress, + &app.IConsensusAddress, + &app.TemplateHash, + &app.TemplateURI, + &app.EpochLength, + &app.State, + &app.Reason, + &app.LastProcessedBlock, + &app.LastClaimCheckBlock, + &app.LastOutputCheckBlock, + &app.ProcessedInputs, + &app.CreatedAt, + &app.UpdatedAt, + &app.ExecutionParameters.ApplicationID, + &app.ExecutionParameters.SnapshotPolicy, + &app.ExecutionParameters.SnapshotRetention, + &app.ExecutionParameters.AdvanceIncCycles, + &app.ExecutionParameters.AdvanceMaxCycles, + &app.ExecutionParameters.InspectIncCycles, + &app.ExecutionParameters.InspectMaxCycles, + &app.ExecutionParameters.AdvanceIncDeadline, + &app.ExecutionParameters.AdvanceMaxDeadline, + &app.ExecutionParameters.InspectIncDeadline, + &app.ExecutionParameters.InspectMaxDeadline, + &app.ExecutionParameters.LoadDeadline, + &app.ExecutionParameters.StoreDeadline, + &app.ExecutionParameters.FastDeadline, + &app.ExecutionParameters.MaxConcurrentInspects, + &app.ExecutionParameters.CreatedAt, + &app.ExecutionParameters.UpdatedAt, + ) + if err != nil { + return nil, err + } + apps = append(apps, &app) + } + + return apps, nil +} + +func (r *postgresRepository) GetExecutionParameters( + ctx context.Context, + applicationID int64, +) (*model.ExecutionParameters, error) { + + stmt := table.ExecutionParameters. + SELECT( + table.ExecutionParameters.ApplicationID, + table.ExecutionParameters.SnapshotPolicy, + table.ExecutionParameters.SnapshotRetention, + table.ExecutionParameters.AdvanceIncCycles, + table.ExecutionParameters.AdvanceMaxCycles, + table.ExecutionParameters.InspectIncCycles, + table.ExecutionParameters.InspectMaxCycles, + table.ExecutionParameters.AdvanceIncDeadline, + table.ExecutionParameters.AdvanceMaxDeadline, + table.ExecutionParameters.InspectIncDeadline, + table.ExecutionParameters.InspectMaxDeadline, + table.ExecutionParameters.LoadDeadline, + table.ExecutionParameters.StoreDeadline, + table.ExecutionParameters.FastDeadline, + table.ExecutionParameters.MaxConcurrentInspects, + table.ExecutionParameters.CreatedAt, + table.ExecutionParameters.UpdatedAt, + ). + WHERE(table.ExecutionParameters.ApplicationID.EQ(postgres.Int(applicationID))) + + sqlStr, args := stmt.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var ep model.ExecutionParameters + err := row.Scan( + &ep.ApplicationID, + &ep.SnapshotPolicy, + &ep.SnapshotRetention, + &ep.AdvanceIncCycles, + &ep.AdvanceMaxCycles, + &ep.InspectIncCycles, + &ep.InspectMaxCycles, + &ep.AdvanceIncDeadline, + &ep.AdvanceMaxDeadline, + &ep.InspectIncDeadline, + &ep.InspectMaxDeadline, + &ep.LoadDeadline, + &ep.StoreDeadline, + &ep.FastDeadline, + &ep.MaxConcurrentInspects, + &ep.CreatedAt, + &ep.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil // not found + } + if err != nil { + return nil, err + } + return &ep, nil +} + +func (r *postgresRepository) UpdateExecutionParameters( + ctx context.Context, + ep *model.ExecutionParameters, +) error { + + upd := table.ExecutionParameters. + UPDATE( + table.ExecutionParameters.SnapshotPolicy, + table.ExecutionParameters.SnapshotRetention, + table.ExecutionParameters.AdvanceIncCycles, + table.ExecutionParameters.AdvanceMaxCycles, + table.ExecutionParameters.InspectIncCycles, + table.ExecutionParameters.InspectMaxCycles, + table.ExecutionParameters.AdvanceIncDeadline, + table.ExecutionParameters.AdvanceMaxDeadline, + table.ExecutionParameters.InspectIncDeadline, + table.ExecutionParameters.InspectMaxDeadline, + table.ExecutionParameters.LoadDeadline, + table.ExecutionParameters.StoreDeadline, + table.ExecutionParameters.FastDeadline, + table.ExecutionParameters.MaxConcurrentInspects, + ). + SET( + ep.SnapshotPolicy, + ep.SnapshotRetention, + ep.AdvanceIncCycles, + ep.AdvanceMaxCycles, + ep.InspectIncCycles, + ep.InspectMaxCycles, + ep.AdvanceIncDeadline, + ep.AdvanceMaxDeadline, + ep.InspectIncDeadline, + ep.InspectMaxDeadline, + ep.LoadDeadline, + ep.StoreDeadline, + ep.FastDeadline, + ep.MaxConcurrentInspects, + ). + WHERE(table.ExecutionParameters.ApplicationID.EQ(postgres.Int(ep.ApplicationID))) + + sqlStr, args := upd.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} diff --git a/internal/repository/postgres/bulk.go b/internal/repository/postgres/bulk.go new file mode 100644 index 000000000..dc03e6305 --- /dev/null +++ b/internal/repository/postgres/bulk.go @@ -0,0 +1,397 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +func encodeSiblings(outputHashesSiblings []common.Hash) ([]byte, error) { + // 1) Make a slice of []byte + arr := make([][]byte, 0, len(outputHashesSiblings)) + for _, h := range outputHashesSiblings { + // h is [32]byte + // we must copy it into a slice of bytes + copyH := make([]byte, len(h)) + copy(copyH, h[:]) + arr = append(arr, copyH) + } + + // 2) Use pgtype.ByteaArray and call Set with [][]byte + var siblings pgtype.ByteaArray + if err := siblings.Set(arr); err != nil { + return nil, fmt.Errorf("failed to set ByteaArray: %w", err) + } + + // 3) Encode it as text (the Postgres array string, e.g. '{\\x...,\\x..., ...}') + encoded, err := siblings.EncodeText(nil, nil) + if err != nil { + return nil, fmt.Errorf("failed to encode ByteaArray: %w", err) + } + + return encoded, nil +} + +func getOutputNextIndex( + ctx context.Context, + tx pgx.Tx, + appID int64, +) (uint64, error) { + + query := table.Output.SELECT( + postgres.COALESCE( + postgres.Float(1).ADD(postgres.MAXf(table.Output.Index)), + postgres.Float(0), + ), + ).FROM( + table.Output.INNER_JOIN(table.Input, table.Input.EpochApplicationID.EQ(table.Output.InputEpochApplicationID). + AND(table.Input.Index.EQ(table.Output.InputIndex))), + ).WHERE( + table.Output.InputEpochApplicationID.EQ(postgres.Int64(appID)). + AND(table.Input.Status.EQ(postgres.NewEnumValue(model.InputCompletionStatus_Accepted.String()))), + ) + + queryStr, args := query.Sql() + var currentIndex uint64 + err := tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) + if err != nil { + err = fmt.Errorf("failed to get the next output index: %w", err) + return 0, errors.Join(err, tx.Rollback(ctx)) + } + return currentIndex, nil +} + +func getReportNextIndex( + ctx context.Context, + tx pgx.Tx, + appID int64, +) (uint64, error) { + + query := table.Report.SELECT( + postgres.COALESCE( + postgres.Float(1).ADD(postgres.MAXf(table.Report.Index)), + postgres.Float(0), + ), + ).FROM( + table.Report.INNER_JOIN(table.Input, table.Input.EpochApplicationID.EQ(table.Report.InputEpochApplicationID)), + ).WHERE( + table.Report.InputEpochApplicationID.EQ(postgres.Int64(appID)). + AND(table.Input.Status.EQ(postgres.NewEnumValue(model.InputCompletionStatus_Accepted.String()))), + ) + + queryStr, args := query.Sql() + var currentIndex uint64 + err := tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) + if err != nil { + err = fmt.Errorf("failed to get the next report index: %w", err) + return 0, errors.Join(err, tx.Rollback(ctx)) + } + return currentIndex, nil +} + +func insertOutputs( + ctx context.Context, + tx pgx.Tx, + appID int64, + inputIndex uint64, + dataArray [][]byte, +) error { + if len(dataArray) < 1 { + return nil + } + + nextIndex, err := getOutputNextIndex(ctx, tx, appID) + if err != nil { + return err + } + + stmt := table.Output.INSERT( + table.Output.InputEpochApplicationID, + table.Output.InputIndex, + table.Output.Index, + table.Output.RawData, + ) + for i, data := range dataArray { + stmt = stmt.VALUES( + appID, + inputIndex, + nextIndex+uint64(i), + data, + ) + } + + sqlStr, args := stmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func insertReports( + ctx context.Context, + tx pgx.Tx, + appID int64, + inputIndex uint64, + dataArray [][]byte, +) error { + if len(dataArray) < 1 { + return nil + } + + nextIndex, err := getReportNextIndex(ctx, tx, appID) + if err != nil { + return err + } + + stmt := table.Report.INSERT( + table.Report.InputEpochApplicationID, + table.Report.InputIndex, + table.Report.Index, + table.Report.RawData, + ) + for i, data := range dataArray { + stmt = stmt.VALUES( + appID, + inputIndex, + nextIndex+uint64(i), + data, + ) + } + + sqlStr, args := stmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func updateInput( + ctx context.Context, + tx pgx.Tx, + appID int64, + inputIndex uint64, + status model.InputCompletionStatus, + outputsHash common.Hash, + machineHash common.Hash, +) error { + + updStmt := table.Input. + UPDATE( + table.Input.Status, + table.Input.MachineHash, + table.Input.OutputsHash, + ). + SET( + status, + machineHash, + outputsHash, + ). + WHERE( + table.Input.EpochApplicationID.EQ(postgres.Int64(appID)). + AND(table.Input.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", inputIndex)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func updateApp( + ctx context.Context, + tx pgx.Tx, + appID int64, + inputIndex uint64, +) error { + + updStmt := table.Application. + UPDATE( + table.Application.ProcessedInputs, + ). + SET( + postgres.RawFloat(fmt.Sprintf("%d", inputIndex+1)), + ). + WHERE( + table.Application.ID.EQ(postgres.Int64(appID)), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *postgresRepository) StoreAdvanceResult( + ctx context.Context, + appID int64, + res *model.AdvanceResult, +) error { + tx, err := r.db.Begin(ctx) + if err != nil { + return err + } + + err = insertOutputs(ctx, tx, appID, res.InputIndex, res.Outputs) + if err != nil { + return err + } + + err = insertReports(ctx, tx, appID, res.InputIndex, res.Reports) + if err != nil { + return err + } + + err = updateInput(ctx, tx, appID, res.InputIndex, res.Status, res.OutputsHash, *res.MachineHash) + if err != nil { + return err + } + + err = updateApp(ctx, tx, appID, res.InputIndex) + if err != nil { + return err + } + + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + return nil +} + +func updateEpochClaim( + ctx context.Context, + tx pgx.Tx, + e *model.Epoch, +) error { + + updStmt := table.Epoch. + UPDATE( + table.Epoch.ClaimHash, + table.Epoch.Status, + ). + SET( + e.ClaimHash, + postgres.NewEnumValue(model.EpochStatus_ClaimComputed.String()), + ). + WHERE( + table.Epoch.ApplicationID.EQ(postgres.Int64(e.ApplicationID)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", e.Index)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join( + fmt.Errorf("SetEpochClaimAndInsertProofsTransaction failed: %w", err), + tx.Rollback(ctx), + ) + } + if cmd.RowsAffected() != 1 { + return errors.Join( + fmt.Errorf("failed to update application %d epoch %d: no rows affected", e.ApplicationID, e.Index), + tx.Rollback(ctx), + ) + } + return nil +} + +func updateOutputs( + ctx context.Context, + tx pgx.Tx, + outputs []*model.Output, +) error { + for _, output := range outputs { + siblings, err := encodeSiblings(output.OutputHashesSiblings) + if err != nil { + return errors.Join( + fmt.Errorf("failed to serialize outputHashesSiblings for output '%d'. %w", output.Index, err), + tx.Rollback(ctx), + ) + } + + updStmt := table.Output. + UPDATE( + table.Output.Hash, + table.Output.OutputHashesSiblings, + ). + SET( + output.Hash, + siblings, + ). + WHERE( + table.Output.InputEpochApplicationID.EQ(postgres.Int64(output.InputEpochApplicationID)). + AND(table.Output.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", output.Index)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join( + fmt.Errorf("failed to insert proof for output '%d'. %w", output.Index, err), + tx.Rollback(ctx), + ) + } + if cmd.RowsAffected() == 0 { + return errors.Join( + fmt.Errorf( + "failed to insert proof for output '%d'. No rows affected", + output.Index, + ), + tx.Rollback(ctx), + ) + } + } + return nil +} + +func (r *postgresRepository) StoreClaimAndProofs(ctx context.Context, epoch *model.Epoch, outputs []*model.Output) error { + + tx, err := r.db.Begin(ctx) + if err != nil { + return fmt.Errorf("SetEpochClaimAndInsertProofsTransaction failed: %w", err) + } + + err = updateEpochClaim(ctx, tx, epoch) + if err != nil { + return err + } + + err = updateOutputs(ctx, tx, outputs) + if err != nil { + return err + } + + err = tx.Commit(ctx) + if err != nil { + return errors.Join( + fmt.Errorf("SetEpochClaimAndInsertProofsTransaction failed: %w", err), + tx.Rollback(ctx), + ) + } + return nil +} diff --git a/internal/repository/postgres/claimer.go b/internal/repository/postgres/claimer.go new file mode 100644 index 000000000..0b9fdb976 --- /dev/null +++ b/internal/repository/postgres/claimer.go @@ -0,0 +1,226 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + "github.com/jackc/pgx/v5" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +var ( + ErrNoUpdate = fmt.Errorf("update did not take effect") +) + +// Retrieve the computed claim of each application with the smallest index. +// The query may return either 0 or 1 entries per application. +func (r *postgresRepository) SelectOldestComputedClaimPerApp(ctx context.Context) ( + map[common.Address]*model.ClaimRow, + error, +) { + // NOTE(mpolitzer): DISTINCT ON is a postgres extension. To implement + // this in SQLite there is an alternative using GROUP BY and HAVING + // clauses instead. + stmt := table.Epoch.SELECT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + table.Epoch.VirtualIndex, + table.Epoch.CreatedAt, + table.Epoch.UpdatedAt, + table.Application.IapplicationAddress, + table.Application.IconsensusAddress, + ). + DISTINCT(table.Epoch.ApplicationID). + FROM( + table.Epoch. + INNER_JOIN( + table.Application, + table.Epoch.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_ClaimComputed.String())). + AND(table.Application.State.EQ(postgres.NewEnumValue(model.ApplicationState_Enabled.String()))), + ). + ORDER_BY( + table.Epoch.ApplicationID, + table.Epoch.Index.ASC(), + ) + + sqlStr, args := stmt.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + results := map[common.Address]*model.ClaimRow{} + for rows.Next() { + var cr model.ClaimRow + err := rows.Scan( + &cr.ApplicationID, + &cr.Index, + &cr.FirstBlock, + &cr.LastBlock, + &cr.ClaimHash, + &cr.ClaimTransactionHash, + &cr.Status, + &cr.VirtualIndex, + &cr.CreatedAt, + &cr.UpdatedAt, + &cr.IApplicationAddress, + &cr.IConsensusAddress, + ) + if err != nil { + return nil, err + } + results[cr.IApplicationAddress] = &cr + } + return results, nil +} + +// Retrieve the newest accepted claim of each application +func (r *postgresRepository) SelectNewestSubmittedOrAcceptedClaimPerApp(ctx context.Context) ( + map[common.Address]*model.ClaimRow, + error, +) { + // NOTE(mpolitzer): DISTINCT ON is a postgres extension. To implement + // this in SQLite there is an alternative using GROUP BY and HAVING + // clauses instead. + stmt := table.Epoch.SELECT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + table.Epoch.VirtualIndex, + table.Epoch.CreatedAt, + table.Epoch.UpdatedAt, + table.Application.IapplicationAddress, + table.Application.IconsensusAddress, + ). + DISTINCT(table.Epoch.ApplicationID). + FROM( + table.Epoch. + INNER_JOIN( + table.Application, + table.Epoch.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_ClaimSubmitted.String())). + OR(table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_ClaimAccepted.String()))). + AND(table.Application.State.EQ(postgres.NewEnumValue(model.ApplicationState_Enabled.String()))), + ). + ORDER_BY( + table.Epoch.ApplicationID, + table.Epoch.Index.DESC(), + ) + + sqlStr, args := stmt.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + results := map[common.Address]*model.ClaimRow{} + for rows.Next() { + var cr model.ClaimRow + err := rows.Scan( + &cr.ApplicationID, + &cr.Index, + &cr.FirstBlock, + &cr.LastBlock, + &cr.ClaimHash, + &cr.ClaimTransactionHash, + &cr.Status, + &cr.VirtualIndex, + &cr.CreatedAt, + &cr.UpdatedAt, + &cr.IApplicationAddress, + &cr.IConsensusAddress, + ) + if err != nil { + return nil, err + } + results[cr.IApplicationAddress] = &cr + } + return results, nil +} + +func (r *postgresRepository) SelectClaimPairsPerApp(ctx context.Context) ( + map[common.Address]*model.ClaimRow, + map[common.Address]*model.ClaimRow, + error, +) { + tx, err := r.db.BeginTx(ctx, pgx.TxOptions{ + IsoLevel: pgx.RepeatableRead, + AccessMode: pgx.ReadOnly, + }) + if err != nil { + return nil, nil, err + } + defer tx.Commit(ctx) + + computed, err := r.SelectOldestComputedClaimPerApp(ctx) + if err != nil { + return nil, nil, err + } + + accepted, err := r.SelectNewestSubmittedOrAcceptedClaimPerApp(ctx) + if err != nil { + return nil, nil, err + } + + return computed, accepted, err +} + +func (r *postgresRepository) UpdateEpochWithSubmittedClaim( + ctx context.Context, + application_id int64, + index uint64, + transaction_hash common.Hash, +) error { + updStmt := table.Epoch. + UPDATE( + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + ). + SET( + transaction_hash, + postgres.NewEnumValue(model.EpochStatus_ClaimSubmitted.String()), + ). + FROM( + table.Application, + ). + WHERE( + table.Epoch.ApplicationID.EQ(postgres.Int64(application_id)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", index)))). + AND(table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_ClaimComputed.String()))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return ErrNoUpdate + } + return nil +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/applicationstate.go b/internal/repository/postgres/db/rollupsdb/public/enum/applicationstate.go new file mode 100644 index 000000000..9b7965cf4 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/applicationstate.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var ApplicationState = &struct { + Enabled postgres.StringExpression + Disabled postgres.StringExpression + Inoperable postgres.StringExpression +}{ + Enabled: postgres.NewEnumValue("ENABLED"), + Disabled: postgres.NewEnumValue("DISABLED"), + Inoperable: postgres.NewEnumValue("INOPERABLE"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/defaultblock.go b/internal/repository/postgres/db/rollupsdb/public/enum/defaultblock.go new file mode 100644 index 000000000..9708d01c4 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/defaultblock.go @@ -0,0 +1,22 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var DefaultBlock = &struct { + Finalized postgres.StringExpression + Latest postgres.StringExpression + Pending postgres.StringExpression + Safe postgres.StringExpression +}{ + Finalized: postgres.NewEnumValue("FINALIZED"), + Latest: postgres.NewEnumValue("LATEST"), + Pending: postgres.NewEnumValue("PENDING"), + Safe: postgres.NewEnumValue("SAFE"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/epochstatus.go b/internal/repository/postgres/db/rollupsdb/public/enum/epochstatus.go new file mode 100644 index 000000000..b0b04f8cc --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/epochstatus.go @@ -0,0 +1,28 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var EpochStatus = &struct { + Open postgres.StringExpression + Closed postgres.StringExpression + InputsProcessed postgres.StringExpression + ClaimComputed postgres.StringExpression + ClaimSubmitted postgres.StringExpression + ClaimAccepted postgres.StringExpression + ClaimRejected postgres.StringExpression +}{ + Open: postgres.NewEnumValue("OPEN"), + Closed: postgres.NewEnumValue("CLOSED"), + InputsProcessed: postgres.NewEnumValue("INPUTS_PROCESSED"), + ClaimComputed: postgres.NewEnumValue("CLAIM_COMPUTED"), + ClaimSubmitted: postgres.NewEnumValue("CLAIM_SUBMITTED"), + ClaimAccepted: postgres.NewEnumValue("CLAIM_ACCEPTED"), + ClaimRejected: postgres.NewEnumValue("CLAIM_REJECTED"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/inputcompletionstatus.go b/internal/repository/postgres/db/rollupsdb/public/enum/inputcompletionstatus.go new file mode 100644 index 000000000..f18248333 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/inputcompletionstatus.go @@ -0,0 +1,32 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var InputCompletionStatus = &struct { + None postgres.StringExpression + Accepted postgres.StringExpression + Rejected postgres.StringExpression + Exception postgres.StringExpression + MachineHalted postgres.StringExpression + OutputsLimitExceeded postgres.StringExpression + CycleLimitExceeded postgres.StringExpression + TimeLimitExceeded postgres.StringExpression + PayloadLengthLimitExceeded postgres.StringExpression +}{ + None: postgres.NewEnumValue("NONE"), + Accepted: postgres.NewEnumValue("ACCEPTED"), + Rejected: postgres.NewEnumValue("REJECTED"), + Exception: postgres.NewEnumValue("EXCEPTION"), + MachineHalted: postgres.NewEnumValue("MACHINE_HALTED"), + OutputsLimitExceeded: postgres.NewEnumValue("OUTPUTS_LIMIT_EXCEEDED"), + CycleLimitExceeded: postgres.NewEnumValue("CYCLE_LIMIT_EXCEEDED"), + TimeLimitExceeded: postgres.NewEnumValue("TIME_LIMIT_EXCEEDED"), + PayloadLengthLimitExceeded: postgres.NewEnumValue("PAYLOAD_LENGTH_LIMIT_EXCEEDED"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/snapshotpolicy.go b/internal/repository/postgres/db/rollupsdb/public/enum/snapshotpolicy.go new file mode 100644 index 000000000..dbff9936d --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/snapshotpolicy.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var SnapshotPolicy = &struct { + None postgres.StringExpression + EachInput postgres.StringExpression + EachEpoch postgres.StringExpression +}{ + None: postgres.NewEnumValue("NONE"), + EachInput: postgres.NewEnumValue("EACH_INPUT"), + EachEpoch: postgres.NewEnumValue("EACH_EPOCH"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/application.go b/internal/repository/postgres/db/rollupsdb/public/table/application.go new file mode 100644 index 000000000..2163f7165 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/application.go @@ -0,0 +1,117 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Application = newApplicationTable("public", "application", "") + +type applicationTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + Name postgres.ColumnString + IapplicationAddress postgres.ColumnString + IconsensusAddress postgres.ColumnString + TemplateHash postgres.ColumnString + TemplateURI postgres.ColumnString + EpochLength postgres.ColumnFloat + State postgres.ColumnString + Reason postgres.ColumnString + LastProcessedBlock postgres.ColumnFloat + LastClaimCheckBlock postgres.ColumnFloat + LastOutputCheckBlock postgres.ColumnFloat + ProcessedInputs postgres.ColumnFloat + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type ApplicationTable struct { + applicationTable + + EXCLUDED applicationTable +} + +// AS creates new ApplicationTable with assigned alias +func (a ApplicationTable) AS(alias string) *ApplicationTable { + return newApplicationTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new ApplicationTable with assigned schema name +func (a ApplicationTable) FromSchema(schemaName string) *ApplicationTable { + return newApplicationTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new ApplicationTable with assigned table prefix +func (a ApplicationTable) WithPrefix(prefix string) *ApplicationTable { + return newApplicationTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new ApplicationTable with assigned table suffix +func (a ApplicationTable) WithSuffix(suffix string) *ApplicationTable { + return newApplicationTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newApplicationTable(schemaName, tableName, alias string) *ApplicationTable { + return &ApplicationTable{ + applicationTable: newApplicationTableImpl(schemaName, tableName, alias), + EXCLUDED: newApplicationTableImpl("", "excluded", ""), + } +} + +func newApplicationTableImpl(schemaName, tableName, alias string) applicationTable { + var ( + IDColumn = postgres.IntegerColumn("id") + NameColumn = postgres.StringColumn("name") + IapplicationAddressColumn = postgres.StringColumn("iapplication_address") + IconsensusAddressColumn = postgres.StringColumn("iconsensus_address") + TemplateHashColumn = postgres.StringColumn("template_hash") + TemplateURIColumn = postgres.StringColumn("template_uri") + EpochLengthColumn = postgres.FloatColumn("epoch_length") + StateColumn = postgres.StringColumn("state") + ReasonColumn = postgres.StringColumn("reason") + LastProcessedBlockColumn = postgres.FloatColumn("last_processed_block") + LastClaimCheckBlockColumn = postgres.FloatColumn("last_claim_check_block") + LastOutputCheckBlockColumn = postgres.FloatColumn("last_output_check_block") + ProcessedInputsColumn = postgres.FloatColumn("processed_inputs") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{IDColumn, NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, StateColumn, ReasonColumn, LastProcessedBlockColumn, LastClaimCheckBlockColumn, LastOutputCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, StateColumn, ReasonColumn, LastProcessedBlockColumn, LastClaimCheckBlockColumn, LastOutputCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return applicationTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + Name: NameColumn, + IapplicationAddress: IapplicationAddressColumn, + IconsensusAddress: IconsensusAddressColumn, + TemplateHash: TemplateHashColumn, + TemplateURI: TemplateURIColumn, + EpochLength: EpochLengthColumn, + State: StateColumn, + Reason: ReasonColumn, + LastProcessedBlock: LastProcessedBlockColumn, + LastClaimCheckBlock: LastClaimCheckBlockColumn, + LastOutputCheckBlock: LastOutputCheckBlockColumn, + ProcessedInputs: ProcessedInputsColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/epoch.go b/internal/repository/postgres/db/rollupsdb/public/table/epoch.go new file mode 100644 index 000000000..8fd8cc12c --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/epoch.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Epoch = newEpochTable("public", "epoch", "") + +type epochTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + Index postgres.ColumnFloat + FirstBlock postgres.ColumnFloat + LastBlock postgres.ColumnFloat + ClaimHash postgres.ColumnString + ClaimTransactionHash postgres.ColumnString + Status postgres.ColumnString + VirtualIndex postgres.ColumnFloat + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type EpochTable struct { + epochTable + + EXCLUDED epochTable +} + +// AS creates new EpochTable with assigned alias +func (a EpochTable) AS(alias string) *EpochTable { + return newEpochTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new EpochTable with assigned schema name +func (a EpochTable) FromSchema(schemaName string) *EpochTable { + return newEpochTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new EpochTable with assigned table prefix +func (a EpochTable) WithPrefix(prefix string) *EpochTable { + return newEpochTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new EpochTable with assigned table suffix +func (a EpochTable) WithSuffix(suffix string) *EpochTable { + return newEpochTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newEpochTable(schemaName, tableName, alias string) *EpochTable { + return &EpochTable{ + epochTable: newEpochTableImpl(schemaName, tableName, alias), + EXCLUDED: newEpochTableImpl("", "excluded", ""), + } +} + +func newEpochTableImpl(schemaName, tableName, alias string) epochTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + IndexColumn = postgres.FloatColumn("index") + FirstBlockColumn = postgres.FloatColumn("first_block") + LastBlockColumn = postgres.FloatColumn("last_block") + ClaimHashColumn = postgres.StringColumn("claim_hash") + ClaimTransactionHashColumn = postgres.StringColumn("claim_transaction_hash") + StatusColumn = postgres.StringColumn("status") + VirtualIndexColumn = postgres.FloatColumn("virtual_index") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, IndexColumn, FirstBlockColumn, LastBlockColumn, ClaimHashColumn, ClaimTransactionHashColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{FirstBlockColumn, LastBlockColumn, ClaimHashColumn, ClaimTransactionHashColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return epochTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + Index: IndexColumn, + FirstBlock: FirstBlockColumn, + LastBlock: LastBlockColumn, + ClaimHash: ClaimHashColumn, + ClaimTransactionHash: ClaimTransactionHashColumn, + Status: StatusColumn, + VirtualIndex: VirtualIndexColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/execution_parameters.go b/internal/repository/postgres/db/rollupsdb/public/table/execution_parameters.go new file mode 100644 index 000000000..751f5c9f4 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/execution_parameters.go @@ -0,0 +1,123 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var ExecutionParameters = newExecutionParametersTable("public", "execution_parameters", "") + +type executionParametersTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + SnapshotPolicy postgres.ColumnString + SnapshotRetention postgres.ColumnInteger + AdvanceIncCycles postgres.ColumnInteger + AdvanceMaxCycles postgres.ColumnInteger + InspectIncCycles postgres.ColumnInteger + InspectMaxCycles postgres.ColumnInteger + AdvanceIncDeadline postgres.ColumnInteger + AdvanceMaxDeadline postgres.ColumnInteger + InspectIncDeadline postgres.ColumnInteger + InspectMaxDeadline postgres.ColumnInteger + LoadDeadline postgres.ColumnInteger + StoreDeadline postgres.ColumnInteger + FastDeadline postgres.ColumnInteger + MaxConcurrentInspects postgres.ColumnInteger + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type ExecutionParametersTable struct { + executionParametersTable + + EXCLUDED executionParametersTable +} + +// AS creates new ExecutionParametersTable with assigned alias +func (a ExecutionParametersTable) AS(alias string) *ExecutionParametersTable { + return newExecutionParametersTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new ExecutionParametersTable with assigned schema name +func (a ExecutionParametersTable) FromSchema(schemaName string) *ExecutionParametersTable { + return newExecutionParametersTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new ExecutionParametersTable with assigned table prefix +func (a ExecutionParametersTable) WithPrefix(prefix string) *ExecutionParametersTable { + return newExecutionParametersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new ExecutionParametersTable with assigned table suffix +func (a ExecutionParametersTable) WithSuffix(suffix string) *ExecutionParametersTable { + return newExecutionParametersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newExecutionParametersTable(schemaName, tableName, alias string) *ExecutionParametersTable { + return &ExecutionParametersTable{ + executionParametersTable: newExecutionParametersTableImpl(schemaName, tableName, alias), + EXCLUDED: newExecutionParametersTableImpl("", "excluded", ""), + } +} + +func newExecutionParametersTableImpl(schemaName, tableName, alias string) executionParametersTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + SnapshotPolicyColumn = postgres.StringColumn("snapshot_policy") + SnapshotRetentionColumn = postgres.IntegerColumn("snapshot_retention") + AdvanceIncCyclesColumn = postgres.IntegerColumn("advance_inc_cycles") + AdvanceMaxCyclesColumn = postgres.IntegerColumn("advance_max_cycles") + InspectIncCyclesColumn = postgres.IntegerColumn("inspect_inc_cycles") + InspectMaxCyclesColumn = postgres.IntegerColumn("inspect_max_cycles") + AdvanceIncDeadlineColumn = postgres.IntegerColumn("advance_inc_deadline") + AdvanceMaxDeadlineColumn = postgres.IntegerColumn("advance_max_deadline") + InspectIncDeadlineColumn = postgres.IntegerColumn("inspect_inc_deadline") + InspectMaxDeadlineColumn = postgres.IntegerColumn("inspect_max_deadline") + LoadDeadlineColumn = postgres.IntegerColumn("load_deadline") + StoreDeadlineColumn = postgres.IntegerColumn("store_deadline") + FastDeadlineColumn = postgres.IntegerColumn("fast_deadline") + MaxConcurrentInspectsColumn = postgres.IntegerColumn("max_concurrent_inspects") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, SnapshotPolicyColumn, SnapshotRetentionColumn, AdvanceIncCyclesColumn, AdvanceMaxCyclesColumn, InspectIncCyclesColumn, InspectMaxCyclesColumn, AdvanceIncDeadlineColumn, AdvanceMaxDeadlineColumn, InspectIncDeadlineColumn, InspectMaxDeadlineColumn, LoadDeadlineColumn, StoreDeadlineColumn, FastDeadlineColumn, MaxConcurrentInspectsColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{SnapshotPolicyColumn, SnapshotRetentionColumn, AdvanceIncCyclesColumn, AdvanceMaxCyclesColumn, InspectIncCyclesColumn, InspectMaxCyclesColumn, AdvanceIncDeadlineColumn, AdvanceMaxDeadlineColumn, InspectIncDeadlineColumn, InspectMaxDeadlineColumn, LoadDeadlineColumn, StoreDeadlineColumn, FastDeadlineColumn, MaxConcurrentInspectsColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return executionParametersTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + SnapshotPolicy: SnapshotPolicyColumn, + SnapshotRetention: SnapshotRetentionColumn, + AdvanceIncCycles: AdvanceIncCyclesColumn, + AdvanceMaxCycles: AdvanceMaxCyclesColumn, + InspectIncCycles: InspectIncCyclesColumn, + InspectMaxCycles: InspectMaxCyclesColumn, + AdvanceIncDeadline: AdvanceIncDeadlineColumn, + AdvanceMaxDeadline: AdvanceMaxDeadlineColumn, + InspectIncDeadline: InspectIncDeadlineColumn, + InspectMaxDeadline: InspectMaxDeadlineColumn, + LoadDeadline: LoadDeadlineColumn, + StoreDeadline: StoreDeadlineColumn, + FastDeadline: FastDeadlineColumn, + MaxConcurrentInspects: MaxConcurrentInspectsColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/input.go b/internal/repository/postgres/db/rollupsdb/public/table/input.go new file mode 100644 index 000000000..bcc74d676 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/input.go @@ -0,0 +1,108 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Input = newInputTable("public", "input", "") + +type inputTable struct { + postgres.Table + + // Columns + EpochApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + Index postgres.ColumnFloat + BlockNumber postgres.ColumnFloat + RawData postgres.ColumnString + Status postgres.ColumnString + MachineHash postgres.ColumnString + OutputsHash postgres.ColumnString + TransactionReference postgres.ColumnString + SnapshotURI postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type InputTable struct { + inputTable + + EXCLUDED inputTable +} + +// AS creates new InputTable with assigned alias +func (a InputTable) AS(alias string) *InputTable { + return newInputTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new InputTable with assigned schema name +func (a InputTable) FromSchema(schemaName string) *InputTable { + return newInputTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new InputTable with assigned table prefix +func (a InputTable) WithPrefix(prefix string) *InputTable { + return newInputTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new InputTable with assigned table suffix +func (a InputTable) WithSuffix(suffix string) *InputTable { + return newInputTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newInputTable(schemaName, tableName, alias string) *InputTable { + return &InputTable{ + inputTable: newInputTableImpl(schemaName, tableName, alias), + EXCLUDED: newInputTableImpl("", "excluded", ""), + } +} + +func newInputTableImpl(schemaName, tableName, alias string) inputTable { + var ( + EpochApplicationIDColumn = postgres.IntegerColumn("epoch_application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + IndexColumn = postgres.FloatColumn("index") + BlockNumberColumn = postgres.FloatColumn("block_number") + RawDataColumn = postgres.StringColumn("raw_data") + StatusColumn = postgres.StringColumn("status") + MachineHashColumn = postgres.StringColumn("machine_hash") + OutputsHashColumn = postgres.StringColumn("outputs_hash") + TransactionReferenceColumn = postgres.StringColumn("transaction_reference") + SnapshotURIColumn = postgres.StringColumn("snapshot_uri") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{EpochApplicationIDColumn, EpochIndexColumn, IndexColumn, BlockNumberColumn, RawDataColumn, StatusColumn, MachineHashColumn, OutputsHashColumn, TransactionReferenceColumn, SnapshotURIColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{EpochIndexColumn, BlockNumberColumn, RawDataColumn, StatusColumn, MachineHashColumn, OutputsHashColumn, TransactionReferenceColumn, SnapshotURIColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return inputTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + EpochApplicationID: EpochApplicationIDColumn, + EpochIndex: EpochIndexColumn, + Index: IndexColumn, + BlockNumber: BlockNumberColumn, + RawData: RawDataColumn, + Status: StatusColumn, + MachineHash: MachineHashColumn, + OutputsHash: OutputsHashColumn, + TransactionReference: TransactionReferenceColumn, + SnapshotURI: SnapshotURIColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/node_config.go b/internal/repository/postgres/db/rollupsdb/public/table/node_config.go new file mode 100644 index 000000000..62aa951ca --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/node_config.go @@ -0,0 +1,84 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var NodeConfig = newNodeConfigTable("public", "node_config", "") + +type nodeConfigTable struct { + postgres.Table + + // Columns + Key postgres.ColumnString + Value postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type NodeConfigTable struct { + nodeConfigTable + + EXCLUDED nodeConfigTable +} + +// AS creates new NodeConfigTable with assigned alias +func (a NodeConfigTable) AS(alias string) *NodeConfigTable { + return newNodeConfigTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new NodeConfigTable with assigned schema name +func (a NodeConfigTable) FromSchema(schemaName string) *NodeConfigTable { + return newNodeConfigTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new NodeConfigTable with assigned table prefix +func (a NodeConfigTable) WithPrefix(prefix string) *NodeConfigTable { + return newNodeConfigTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new NodeConfigTable with assigned table suffix +func (a NodeConfigTable) WithSuffix(suffix string) *NodeConfigTable { + return newNodeConfigTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newNodeConfigTable(schemaName, tableName, alias string) *NodeConfigTable { + return &NodeConfigTable{ + nodeConfigTable: newNodeConfigTableImpl(schemaName, tableName, alias), + EXCLUDED: newNodeConfigTableImpl("", "excluded", ""), + } +} + +func newNodeConfigTableImpl(schemaName, tableName, alias string) nodeConfigTable { + var ( + KeyColumn = postgres.StringColumn("key") + ValueColumn = postgres.StringColumn("value") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{KeyColumn, ValueColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{ValueColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return nodeConfigTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + Key: KeyColumn, + Value: ValueColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/output.go b/internal/repository/postgres/db/rollupsdb/public/table/output.go new file mode 100644 index 000000000..bc916600d --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/output.go @@ -0,0 +1,99 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Output = newOutputTable("public", "output", "") + +type outputTable struct { + postgres.Table + + // Columns + InputEpochApplicationID postgres.ColumnInteger + InputIndex postgres.ColumnFloat + Index postgres.ColumnFloat + RawData postgres.ColumnString + Hash postgres.ColumnString + OutputHashesSiblings postgres.ColumnString + ExecutionTransactionHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type OutputTable struct { + outputTable + + EXCLUDED outputTable +} + +// AS creates new OutputTable with assigned alias +func (a OutputTable) AS(alias string) *OutputTable { + return newOutputTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new OutputTable with assigned schema name +func (a OutputTable) FromSchema(schemaName string) *OutputTable { + return newOutputTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new OutputTable with assigned table prefix +func (a OutputTable) WithPrefix(prefix string) *OutputTable { + return newOutputTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new OutputTable with assigned table suffix +func (a OutputTable) WithSuffix(suffix string) *OutputTable { + return newOutputTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newOutputTable(schemaName, tableName, alias string) *OutputTable { + return &OutputTable{ + outputTable: newOutputTableImpl(schemaName, tableName, alias), + EXCLUDED: newOutputTableImpl("", "excluded", ""), + } +} + +func newOutputTableImpl(schemaName, tableName, alias string) outputTable { + var ( + InputEpochApplicationIDColumn = postgres.IntegerColumn("input_epoch_application_id") + InputIndexColumn = postgres.FloatColumn("input_index") + IndexColumn = postgres.FloatColumn("index") + RawDataColumn = postgres.StringColumn("raw_data") + HashColumn = postgres.StringColumn("hash") + OutputHashesSiblingsColumn = postgres.StringColumn("output_hashes_siblings") + ExecutionTransactionHashColumn = postgres.StringColumn("execution_transaction_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{InputEpochApplicationIDColumn, InputIndexColumn, IndexColumn, RawDataColumn, HashColumn, OutputHashesSiblingsColumn, ExecutionTransactionHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{InputIndexColumn, RawDataColumn, HashColumn, OutputHashesSiblingsColumn, ExecutionTransactionHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return outputTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + InputEpochApplicationID: InputEpochApplicationIDColumn, + InputIndex: InputIndexColumn, + Index: IndexColumn, + RawData: RawDataColumn, + Hash: HashColumn, + OutputHashesSiblings: OutputHashesSiblingsColumn, + ExecutionTransactionHash: ExecutionTransactionHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/report.go b/internal/repository/postgres/db/rollupsdb/public/table/report.go new file mode 100644 index 000000000..c9db4517e --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/report.go @@ -0,0 +1,90 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Report = newReportTable("public", "report", "") + +type reportTable struct { + postgres.Table + + // Columns + InputEpochApplicationID postgres.ColumnInteger + InputIndex postgres.ColumnFloat + Index postgres.ColumnFloat + RawData postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type ReportTable struct { + reportTable + + EXCLUDED reportTable +} + +// AS creates new ReportTable with assigned alias +func (a ReportTable) AS(alias string) *ReportTable { + return newReportTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new ReportTable with assigned schema name +func (a ReportTable) FromSchema(schemaName string) *ReportTable { + return newReportTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new ReportTable with assigned table prefix +func (a ReportTable) WithPrefix(prefix string) *ReportTable { + return newReportTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new ReportTable with assigned table suffix +func (a ReportTable) WithSuffix(suffix string) *ReportTable { + return newReportTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newReportTable(schemaName, tableName, alias string) *ReportTable { + return &ReportTable{ + reportTable: newReportTableImpl(schemaName, tableName, alias), + EXCLUDED: newReportTableImpl("", "excluded", ""), + } +} + +func newReportTableImpl(schemaName, tableName, alias string) reportTable { + var ( + InputEpochApplicationIDColumn = postgres.IntegerColumn("input_epoch_application_id") + InputIndexColumn = postgres.FloatColumn("input_index") + IndexColumn = postgres.FloatColumn("index") + RawDataColumn = postgres.StringColumn("raw_data") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{InputEpochApplicationIDColumn, InputIndexColumn, IndexColumn, RawDataColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{InputIndexColumn, RawDataColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return reportTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + InputEpochApplicationID: InputEpochApplicationIDColumn, + InputIndex: InputIndexColumn, + Index: IndexColumn, + RawData: RawDataColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/schema_migrations.go b/internal/repository/postgres/db/rollupsdb/public/table/schema_migrations.go new file mode 100644 index 000000000..ebafd2e28 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/schema_migrations.go @@ -0,0 +1,78 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var SchemaMigrations = newSchemaMigrationsTable("public", "schema_migrations", "") + +type schemaMigrationsTable struct { + postgres.Table + + // Columns + Version postgres.ColumnInteger + Dirty postgres.ColumnBool + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type SchemaMigrationsTable struct { + schemaMigrationsTable + + EXCLUDED schemaMigrationsTable +} + +// AS creates new SchemaMigrationsTable with assigned alias +func (a SchemaMigrationsTable) AS(alias string) *SchemaMigrationsTable { + return newSchemaMigrationsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new SchemaMigrationsTable with assigned schema name +func (a SchemaMigrationsTable) FromSchema(schemaName string) *SchemaMigrationsTable { + return newSchemaMigrationsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new SchemaMigrationsTable with assigned table prefix +func (a SchemaMigrationsTable) WithPrefix(prefix string) *SchemaMigrationsTable { + return newSchemaMigrationsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new SchemaMigrationsTable with assigned table suffix +func (a SchemaMigrationsTable) WithSuffix(suffix string) *SchemaMigrationsTable { + return newSchemaMigrationsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newSchemaMigrationsTable(schemaName, tableName, alias string) *SchemaMigrationsTable { + return &SchemaMigrationsTable{ + schemaMigrationsTable: newSchemaMigrationsTableImpl(schemaName, tableName, alias), + EXCLUDED: newSchemaMigrationsTableImpl("", "excluded", ""), + } +} + +func newSchemaMigrationsTableImpl(schemaName, tableName, alias string) schemaMigrationsTable { + var ( + VersionColumn = postgres.IntegerColumn("version") + DirtyColumn = postgres.BoolColumn("dirty") + allColumns = postgres.ColumnList{VersionColumn, DirtyColumn} + mutableColumns = postgres.ColumnList{DirtyColumn} + ) + + return schemaMigrationsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + Version: VersionColumn, + Dirty: DirtyColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go b/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go new file mode 100644 index 000000000..528eff35b --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go @@ -0,0 +1,21 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + Application = Application.FromSchema(schema) + Epoch = Epoch.FromSchema(schema) + ExecutionParameters = ExecutionParameters.FromSchema(schema) + Input = Input.FromSchema(schema) + NodeConfig = NodeConfig.FromSchema(schema) + Output = Output.FromSchema(schema) + Report = Report.FromSchema(schema) + SchemaMigrations = SchemaMigrations.FromSchema(schema) +} diff --git a/internal/repository/postgres/epoch.go b/internal/repository/postgres/epoch.go new file mode 100644 index 000000000..3e259b15f --- /dev/null +++ b/internal/repository/postgres/epoch.go @@ -0,0 +1,582 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + "sort" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" + "github.com/go-jet/jet/v2/postgres" + "github.com/jackc/pgx/v5" +) + +func (r *postgresRepository) CreateEpoch( + ctx context.Context, + nameOrAddress string, + e *model.Epoch, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := postgres.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", e.Index)), + postgres.RawFloat(fmt.Sprintf("%d", e.FirstBlock)), + postgres.RawFloat(fmt.Sprintf("%d", e.LastBlock)), + postgres.Bytea(e.ClaimHash), + postgres.Bytea(e.ClaimTransactionHash), + postgres.NewEnumValue(e.Status.String()), + postgres.RawFloat(fmt.Sprintf("%d", e.VirtualIndex)), + ).FROM( + table.Application, + ).WHERE( + whereClause, + ) + + insertStmt := table.Epoch.INSERT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + table.Epoch.VirtualIndex, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + return err +} + +func getEpochNextVirtualIndex( + ctx context.Context, + tx pgx.Tx, + nameOrAddress string, +) (uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return 0, err + } + + query := table.Epoch.SELECT( + postgres.COALESCE( + postgres.Float(1).ADD(postgres.MAXf(table.Epoch.VirtualIndex)), + postgres.Float(0), + ), + ).FROM( + table.Epoch.INNER_JOIN(table.Application, table.Epoch.ApplicationID.EQ(table.Application.ID)), + ).WHERE( + whereClause, + ) + + queryStr, args := query.Sql() + var currentIndex uint64 + err = tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) + if err != nil { + err = fmt.Errorf("failed to get the next epoch virtual index: %w", err) + return 0, errors.Join(err, tx.Rollback(ctx)) + } + return currentIndex, nil +} + +func orderEpochs(epochInputsMap map[*model.Epoch][]*model.Input) []*model.Epoch { + epochs := make([]*model.Epoch, 0, len(epochInputsMap)) + for e := range epochInputsMap { + epochs = append(epochs, e) + } + + sort.Slice(epochs, func(i, j int) bool { + return epochs[i].FirstBlock < epochs[j].FirstBlock + }) + + return epochs +} + +func (r *postgresRepository) CreateEpochsAndInputs( + ctx context.Context, + nameOrAddress string, + epochInputsMap map[*model.Epoch][]*model.Input, + blockNumber uint64, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + epochInsertStmt := table.Epoch.INSERT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.Status, + table.Epoch.VirtualIndex, + ) + + inputInsertStmt := table.Input. + INSERT( + table.Input.EpochApplicationID, + table.Input.EpochIndex, + table.Input.Index, + table.Input.BlockNumber, + table.Input.RawData, + table.Input.Status, + table.Input.TransactionReference, + ) + + tx, err := r.db.Begin(ctx) + if err != nil { + return err + } + + epochs := orderEpochs(epochInputsMap) + for _, epoch := range epochs { + inputs := epochInputsMap[epoch] + + nextVirtualIndex, err := getEpochNextVirtualIndex(ctx, tx, nameOrAddress) + if err != nil { + return err + } + + epochSelectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", epoch.Index)), + postgres.RawFloat(fmt.Sprintf("%d", epoch.FirstBlock)), + postgres.RawFloat(fmt.Sprintf("%d", epoch.LastBlock)), + postgres.NewEnumValue(epoch.Status.String()), + postgres.RawFloat(fmt.Sprintf("%d", nextVirtualIndex)), + ).WHERE( + whereClause, + ) + + sqlStr, args := epochInsertStmt.QUERY(epochSelectQuery). + ON_CONFLICT(table.Epoch.ApplicationID, table.Epoch.Index). + DO_UPDATE(postgres.SET( + table.Epoch.Status.SET(postgres.NewEnumValue(epoch.Status.String())), + )).Sql() // FIXME on conflict + _, err = tx.Exec(ctx, sqlStr, args...) + + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + for _, input := range inputs { + inputSelectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", epoch.Index)), + postgres.RawFloat(fmt.Sprintf("%d", input.Index)), + postgres.RawFloat(fmt.Sprintf("%d", input.BlockNumber)), + postgres.Bytea(input.RawData), + postgres.NewEnumValue(input.Status.String()), + postgres.Bytea(input.TransactionReference.Bytes()), + ).WHERE( + whereClause, + ) + + sqlStr, args := inputInsertStmt.QUERY(inputSelectQuery).Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + } + } + + // Update last processed block + appUpdateStmt := table.Application. + UPDATE( + table.Application.LastProcessedBlock, + ). + SET( + postgres.RawFloat(fmt.Sprintf("%d", blockNumber)), + ). + WHERE(whereClause) + + sqlStr, args := appUpdateStmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + // Commit transaction + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + return nil +} + +func (r *postgresRepository) GetEpoch( + ctx context.Context, + nameOrAddress string, + index uint64, +) (*model.Epoch, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + stmt := table.Epoch. + SELECT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + table.Epoch.VirtualIndex, + table.Epoch.CreatedAt, + table.Epoch.UpdatedAt, + ). + FROM( + table.Epoch. + INNER_JOIN(table.Application, + table.Epoch.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", index)))), + ) + + sqlStr, args := stmt.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var ep model.Epoch + err = row.Scan( + &ep.ApplicationID, + &ep.Index, + &ep.FirstBlock, + &ep.LastBlock, + &ep.ClaimHash, + &ep.ClaimTransactionHash, + &ep.Status, + &ep.VirtualIndex, + &ep.CreatedAt, + &ep.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &ep, nil +} + +func (r *postgresRepository) GetEpochByVirtualIndex( + ctx context.Context, + nameOrAddress string, + index uint64, +) (*model.Epoch, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + stmt := table.Epoch. + SELECT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + table.Epoch.VirtualIndex, + table.Epoch.CreatedAt, + table.Epoch.UpdatedAt, + ). + FROM( + table.Epoch. + INNER_JOIN(table.Application, + table.Epoch.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Epoch.VirtualIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", index)))), + ) + + sqlStr, args := stmt.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var ep model.Epoch + err = row.Scan( + &ep.ApplicationID, + &ep.Index, + &ep.FirstBlock, + &ep.LastBlock, + &ep.ClaimHash, + &ep.ClaimTransactionHash, + &ep.Status, + &ep.VirtualIndex, + &ep.CreatedAt, + &ep.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &ep, nil +} + +func (r *postgresRepository) UpdateEpoch( + ctx context.Context, + nameOrAddress string, + e *model.Epoch, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + updStmt := table.Epoch. + UPDATE( + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + ). + SET( + e.ClaimHash, + e.ClaimTransactionHash, + e.Status, + ). + FROM( + table.Application, + ). + WHERE( + whereClause. + AND(table.Epoch.ApplicationID.EQ(table.Application.ID)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", e.Index)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *postgresRepository) UpdateEpochsClaimAccepted( + ctx context.Context, + nameOrAddress string, + epochs []*model.Epoch, + lastClaimCheckBlock uint64, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + tx, err := r.db.Begin(ctx) + if err != nil { + return err + } + + for _, e := range epochs { + if e.Status != model.EpochStatus_ClaimAccepted { + return errors.Join( + fmt.Errorf("epoch status must be ClaimAccepted when updating app %s epoch %d", nameOrAddress, e.Index), + tx.Rollback(ctx), + ) + } + updStmt := table.Epoch. + UPDATE( + table.Epoch.Status, + ). + SET( + e.Status, + ). + FROM( + table.Application, + ). + WHERE( + whereClause. + AND(table.Epoch.ApplicationID.EQ(table.Application.ID)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", e.Index)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + if cmd.RowsAffected() != 1 { + return errors.Join( + fmt.Errorf("no row affected when updating app %s epoch %d", nameOrAddress, e.Index), + tx.Rollback(ctx), + ) + } + } + + // Update last claim check block + appUpdateStmt := table.Application. + UPDATE( + table.Application.LastClaimCheckBlock, + ). + SET( + postgres.RawFloat(fmt.Sprintf("%d", lastClaimCheckBlock)), + ). + WHERE(whereClause) + + sqlStr, args := appUpdateStmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + // Commit transaction + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + return nil +} + +func (r *postgresRepository) UpdateEpochsInputsProcessed( + ctx context.Context, + nameOrAddress string, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + subSelect := table.Input.SELECT(postgres.Raw("1")). + WHERE( + table.Input.EpochApplicationID.EQ(table.Epoch.ApplicationID). + AND(table.Input.EpochIndex.EQ(table.Epoch.Index)). + AND(table.Input.Status.EQ(postgres.NewEnumValue(model.InputCompletionStatus_None.String()))), + ) + + notExistsClause := postgres.NOT( + postgres.EXISTS(subSelect), + ) + + updateStmt := table.Epoch.UPDATE(table.Epoch.Status). + SET(postgres.NewEnumValue(model.EpochStatus_InputsProcessed.String())). + FROM(table.Application). + WHERE( + table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_Closed.String())). + AND(table.Epoch.ApplicationID.EQ(table.Application.ID)). + AND(whereClause). + AND(notExistsClause), + ) + + sqlStr, args := updateStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + + return nil +} + +func (r *postgresRepository) ListEpochs( + ctx context.Context, + nameOrAddress string, + f repository.EpochFilter, + p repository.Pagination, +) ([]*model.Epoch, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Epoch. + SELECT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.ClaimHash, + table.Epoch.ClaimTransactionHash, + table.Epoch.Status, + table.Epoch.VirtualIndex, + table.Epoch.CreatedAt, + table.Epoch.UpdatedAt, + ). + FROM( + table.Epoch. + INNER_JOIN(table.Application, + table.Epoch.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.Status != nil { + conditions = append(conditions, table.Epoch.Status.EQ(postgres.NewEnumValue(f.Status.String()))) + } + + if f.BeforeBlock != nil { + conditions = append(conditions, table.Epoch.LastBlock.LT(postgres.RawFloat(fmt.Sprintf("%d", *f.BeforeBlock)))) + } + + sel = sel.WHERE(postgres.AND(conditions...)).ORDER_BY(table.Epoch.Index.ASC()) + + // pagination + if p.Limit > 0 { + sel = sel.LIMIT(p.Limit) + } + if p.Offset > 0 { + sel = sel.OFFSET(p.Offset) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var epochs []*model.Epoch + for rows.Next() { + var ep model.Epoch + err := rows.Scan( + &ep.ApplicationID, + &ep.Index, + &ep.FirstBlock, + &ep.LastBlock, + &ep.ClaimHash, + &ep.ClaimTransactionHash, + &ep.Status, + &ep.VirtualIndex, + &ep.CreatedAt, + &ep.UpdatedAt, + ) + if err != nil { + return nil, err + } + epochs = append(epochs, &ep) + } + return epochs, nil +} diff --git a/internal/repository/postgres/input.go b/internal/repository/postgres/input.go new file mode 100644 index 000000000..8a396295c --- /dev/null +++ b/internal/repository/postgres/input.go @@ -0,0 +1,294 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "fmt" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" +) + +func (r *postgresRepository) GetInput( + ctx context.Context, + nameOrAddress string, + inputIndex uint64, +) (*model.Input, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Input. + SELECT( + table.Input.EpochApplicationID, + table.Input.EpochIndex, + table.Input.Index, + table.Input.BlockNumber, + table.Input.RawData, + table.Input.Status, + table.Input.MachineHash, + table.Input.OutputsHash, + table.Input.TransactionReference, + table.Input.CreatedAt, + table.Input.UpdatedAt, + ). + FROM( + table.Input. + INNER_JOIN(table.Application, + table.Input.EpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Input.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", inputIndex)))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var inp model.Input + err = row.Scan( + &inp.EpochApplicationID, + &inp.EpochIndex, + &inp.Index, + &inp.BlockNumber, + &inp.RawData, + &inp.Status, + &inp.MachineHash, + &inp.OutputsHash, + &inp.TransactionReference, + &inp.CreatedAt, + &inp.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + return &inp, nil +} + +func (r *postgresRepository) GetInputByTxReference( + ctx context.Context, + nameOrAddress string, + ref *common.Hash, +) (*model.Input, error) { + + if ref == nil { + return nil, fmt.Errorf("tx reference is nil") + } + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Input. + SELECT( + table.Input.EpochApplicationID, + table.Input.EpochIndex, + table.Input.Index, + table.Input.BlockNumber, + table.Input.RawData, + table.Input.Status, + table.Input.MachineHash, + table.Input.OutputsHash, + table.Input.TransactionReference, + table.Input.CreatedAt, + table.Input.UpdatedAt, + ). + FROM( + table.Input. + INNER_JOIN(table.Application, + table.Input.EpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Input.TransactionReference.EQ(postgres.Bytea(ref))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var inp model.Input + err = row.Scan( + &inp.EpochApplicationID, + &inp.EpochIndex, + &inp.Index, + &inp.BlockNumber, + &inp.RawData, + &inp.Status, + &inp.MachineHash, + &inp.OutputsHash, + &inp.TransactionReference, + &inp.CreatedAt, + &inp.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + return &inp, nil +} + +func (r *postgresRepository) GetLastInput( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, +) (*model.Input, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Input. + SELECT( + table.Input.EpochApplicationID, + table.Input.EpochIndex, + table.Input.Index, + table.Input.BlockNumber, + table.Input.RawData, + table.Input.Status, + table.Input.MachineHash, + table.Input.OutputsHash, + table.Input.TransactionReference, + table.Input.CreatedAt, + table.Input.UpdatedAt, + ). + FROM( + table.Input. + INNER_JOIN(table.Application, + table.Input.EpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Input.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))), + ). + ORDER_BY(table.Input.Index.DESC()). + LIMIT(1) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var inp model.Input + err = row.Scan( + &inp.EpochApplicationID, + &inp.EpochIndex, + &inp.Index, + &inp.BlockNumber, + &inp.RawData, + &inp.Status, + &inp.MachineHash, + &inp.OutputsHash, + &inp.TransactionReference, + &inp.CreatedAt, + &inp.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + return &inp, nil +} + +func (r *postgresRepository) ListInputs( + ctx context.Context, + nameOrAddress string, + f repository.InputFilter, + p repository.Pagination, +) ([]*model.Input, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Input. + SELECT( + table.Input.EpochApplicationID, + table.Input.EpochIndex, + table.Input.Index, + table.Input.BlockNumber, + table.Input.RawData, + table.Input.Status, + table.Input.MachineHash, + table.Input.OutputsHash, + table.Input.TransactionReference, + table.Input.CreatedAt, + table.Input.UpdatedAt, + ). + FROM( + table.Input. + INNER_JOIN(table.Application, + table.Input.EpochApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.Status != nil { + conditions = append(conditions, table.Input.Status.EQ(postgres.NewEnumValue(f.Status.String()))) + } + + if f.NotStatus != nil { + conditions = append(conditions, table.Input.Status.NOT_EQ(postgres.NewEnumValue(f.NotStatus.String()))) + } + + if f.InputIndex != nil { + conditions = append(conditions, table.Input.Index.GT_EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.InputIndex)))) + } + + sel = sel.WHERE(postgres.AND(conditions...)).ORDER_BY(table.Input.Index.ASC()) + + if p.Limit > 0 { + sel = sel.LIMIT(p.Limit) + } + if p.Offset > 0 { + sel = sel.OFFSET(p.Offset) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var inputs []*model.Input + for rows.Next() { + var in model.Input + err := rows.Scan( + &in.EpochApplicationID, + &in.EpochIndex, + &in.Index, + &in.BlockNumber, + &in.RawData, + &in.Status, + &in.MachineHash, + &in.OutputsHash, + &in.TransactionReference, + &in.CreatedAt, + &in.UpdatedAt, + ) + if err != nil { + return nil, err + } + inputs = append(inputs, &in) + } + return inputs, nil +} diff --git a/internal/repository/postgres/node_config.go b/internal/repository/postgres/node_config.go new file mode 100644 index 000000000..c66ff200c --- /dev/null +++ b/internal/repository/postgres/node_config.go @@ -0,0 +1,65 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" + + "github.com/go-jet/jet/v2/postgres" +) + +func (r *postgresRepository) SaveNodeConfigRaw(ctx context.Context, key string, rawJSON []byte) error { + + insertStmt := table.NodeConfig. + INSERT( + table.NodeConfig.Key, + table.NodeConfig.Value, + ). + VALUES( + key, + postgres.Json(rawJSON), + ). + ON_CONFLICT(table.NodeConfig.Key). + DO_UPDATE(postgres.SET(table.NodeConfig.Value.SET(postgres.Json(rawJSON)))) + + sqlStr, args := insertStmt.Sql() + _, err := r.db.Exec(ctx, sqlStr, args...) + return err +} + +func (r *postgresRepository) LoadNodeConfigRaw(ctx context.Context, key string) ([]byte, time.Time, time.Time, error) { + sel := table.NodeConfig. + SELECT( + table.NodeConfig.Value, + table.NodeConfig.CreatedAt, + table.NodeConfig.UpdatedAt, + ). + LIMIT(1) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var ( + value []byte + createdAt time.Time + updatedAt time.Time + ) + err := row.Scan( + &value, + &createdAt, + &updatedAt, + ) + if err == sql.ErrNoRows { + return nil, time.Time{}, time.Time{}, fmt.Errorf("no node config found for key=%q", key) + } + if err != nil { + return nil, time.Time{}, time.Time{}, err + } + return value, createdAt, updatedAt, nil +} diff --git a/internal/repository/postgres/output.go b/internal/repository/postgres/output.go new file mode 100644 index 000000000..dfb082c7a --- /dev/null +++ b/internal/repository/postgres/output.go @@ -0,0 +1,250 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +func (r *postgresRepository) GetOutput( + ctx context.Context, + nameOrAddress string, + outputIndex uint64, +) (*model.Output, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Output. + SELECT( + table.Output.InputEpochApplicationID, + table.Output.InputIndex, + table.Output.Index, + table.Output.RawData, + table.Output.Hash, + table.Output.OutputHashesSiblings, + table.Output.ExecutionTransactionHash, + table.Output.CreatedAt, + table.Output.UpdatedAt, + ). + FROM( + table.Output. + INNER_JOIN(table.Application, + table.Output.InputEpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Output.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", outputIndex)))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var o model.Output + err = row.Scan( + &o.InputEpochApplicationID, + &o.InputIndex, + &o.Index, + &o.RawData, + &o.Hash, + &o.OutputHashesSiblings, + &o.ExecutionTransactionHash, + &o.CreatedAt, + &o.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + return &o, nil +} + +func (r *postgresRepository) UpdateOutputsExecution( + ctx context.Context, + nameOrAddress string, + outputs []*model.Output, + lastOutputCheckBlock uint64, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + tx, err := r.db.Begin(ctx) + if err != nil { + return err + } + + for _, o := range outputs { + if o.ExecutionTransactionHash == nil { + return errors.Join( + fmt.Errorf("output ExecutionTransactionHash must be not nil when updating app %s output %d", nameOrAddress, o.Index), + tx.Rollback(ctx), + ) + } + updStmt := table.Output. + UPDATE( + table.Output.ExecutionTransactionHash, + ). + SET( + postgres.Bytea(o.ExecutionTransactionHash.Bytes()), + ). + FROM( + table.Application, + ). + WHERE( + whereClause. + AND(table.Output.InputEpochApplicationID.EQ(table.Application.ID)). + AND(table.Output.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", o.Index)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + if cmd.RowsAffected() != 1 { + return errors.Join( + fmt.Errorf("no row affected when updating app %s epoch %d", nameOrAddress, o.Index), + tx.Rollback(ctx), + ) + } + } + + // Update last claim check block + appUpdateStmt := table.Application. + UPDATE( + table.Application.LastOutputCheckBlock, + ). + SET( + postgres.RawFloat(fmt.Sprintf("%d", lastOutputCheckBlock)), + ). + WHERE(whereClause) + + sqlStr, args := appUpdateStmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + // Commit transaction + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + return nil +} + +func (r *postgresRepository) ListOutputs( + ctx context.Context, + nameOrAddress string, + f repository.OutputFilter, + p repository.Pagination, +) ([]*model.Output, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Output. + SELECT( + table.Output.InputEpochApplicationID, + table.Output.InputIndex, + table.Output.Index, + table.Output.RawData, + table.Output.Hash, + table.Output.OutputHashesSiblings, + table.Output.ExecutionTransactionHash, + table.Output.CreatedAt, + table.Output.UpdatedAt, + ). + FROM( + table.Output. + INNER_JOIN( + table.Application, + table.Output.InputEpochApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.BlockRange != nil { + sel = sel. + FROM( // Overwrite FROM clause to include input table + table.Output.INNER_JOIN( + table.Application, + table.Output.InputEpochApplicationID.EQ(table.Application.ID), + ). + INNER_JOIN( + table.Input, + table.Output.InputIndex.EQ(table.Input.Index). + AND(table.Output.InputEpochApplicationID.EQ(table.Input.EpochApplicationID)), + ), + ) + + conditions = append(conditions, table.Input.BlockNumber.BETWEEN( + postgres.RawFloat(fmt.Sprintf("%d", f.BlockRange.Start)), + postgres.RawFloat(fmt.Sprintf("%d", f.BlockRange.End)), + )) + + conditions = append(conditions, table.Input.Status.EQ(postgres.NewEnumValue(model.InputCompletionStatus_Accepted.String()))) + } + + if f.InputIndex != nil { + conditions = append(conditions, table.Output.InputIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.InputIndex)))) + } + + sel = sel.WHERE(postgres.AND(conditions...)).ORDER_BY(table.Output.Index.ASC()) + + if p.Limit > 0 { + sel = sel.LIMIT(p.Limit) + } + if p.Offset > 0 { + sel = sel.OFFSET(p.Offset) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var outputs []*model.Output + for rows.Next() { + var out model.Output + err := rows.Scan( + &out.InputEpochApplicationID, + &out.InputIndex, + &out.Index, + &out.RawData, + &out.Hash, + &out.OutputHashesSiblings, + &out.ExecutionTransactionHash, + &out.CreatedAt, + &out.UpdatedAt, + ) + if err != nil { + return nil, err + } + outputs = append(outputs, &out) + } + return outputs, nil +} diff --git a/internal/repository/postgres/report.go b/internal/repository/postgres/report.go new file mode 100644 index 000000000..245f31885 --- /dev/null +++ b/internal/repository/postgres/report.go @@ -0,0 +1,136 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "fmt" + + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +func (r *postgresRepository) GetReport( + ctx context.Context, + nameOrAddress string, + reportIndex uint64, +) (*model.Report, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Report. + SELECT( + table.Report.InputEpochApplicationID, + table.Report.InputIndex, + table.Report.Index, + table.Report.RawData, + table.Report.CreatedAt, + table.Report.UpdatedAt, + ). + FROM( + table.Report. + INNER_JOIN(table.Application, + table.Report.InputEpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Report.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", reportIndex)))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var rp model.Report + err = row.Scan( + &rp.InputEpochApplicationID, + &rp.InputIndex, + &rp.Index, + &rp.RawData, + &rp.CreatedAt, + &rp.UpdatedAt, + ) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + return &rp, nil +} + +func (r *postgresRepository) ListReports( + ctx context.Context, + nameOrAddress string, + f repository.ReportFilter, + p repository.Pagination, +) ([]*model.Report, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + sel := table.Report. + SELECT( + table.Report.InputEpochApplicationID, + table.Report.InputIndex, + table.Report.Index, + table.Report.RawData, + table.Report.CreatedAt, + table.Report.UpdatedAt, + ). + FROM( + table.Report. + INNER_JOIN(table.Application, + table.Report.InputEpochApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.InputIndex != nil { + conditions = append(conditions, table.Report.InputIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.InputIndex)))) + } + + sel = sel.WHERE(postgres.AND(conditions...)).ORDER_BY(table.Report.Index.ASC()) + + if p.Limit > 0 { + sel = sel.LIMIT(p.Limit) + } + if p.Offset > 0 { + sel = sel.OFFSET(p.Offset) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var reports []*model.Report + for rows.Next() { + var rp model.Report + err := rows.Scan( + &rp.InputEpochApplicationID, + &rp.InputIndex, + &rp.Index, + &rp.RawData, + &rp.CreatedAt, + &rp.UpdatedAt, + ) + if err != nil { + return nil, err + } + reports = append(reports, &rp) + } + return reports, nil +} diff --git a/internal/repository/postgres/repository.go b/internal/repository/postgres/repository.go new file mode 100644 index 000000000..9ca83f772 --- /dev/null +++ b/internal/repository/postgres/repository.go @@ -0,0 +1,60 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "fmt" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/schema" +) + +// postgresRepository is the concrete type that implements the repository.Repository interface. +type postgresRepository struct { + db *pgxpool.Pool +} + +func validateSchema(pool *pgxpool.Pool) error { + + s, err := schema.NewWithPool(pool) + if err != nil { + return err + } + defer s.Close() + + _, err = s.ValidateVersion() + return err +} + +func NewPostgresRepository(ctx context.Context, conn string, maxRetries int, delay time.Duration) (repository.Repository, error) { + + config, err := pgxpool.ParseConfig(conn) + if err != nil { + return nil, fmt.Errorf("failed to parse Postgres connection string: %w", err) + } + pool, err := pgxpool.NewWithConfig(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to create Postgres pool: %w", err) + } + + for i := 0; i < maxRetries; i++ { + if err := pool.Ping(ctx); err == nil { + err = validateSchema(pool) + if err != nil { + return nil, fmt.Errorf("failed to validate Postgres schema version: %w", err) + } + + return &postgresRepository{db: pool}, nil + } + time.Sleep(delay) + } + + pool.Close() + return nil, fmt.Errorf("failed to ping Postgres after %d retries", maxRetries) + +} diff --git a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql new file mode 100644 index 000000000..9b1472f19 --- /dev/null +++ b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql @@ -0,0 +1,42 @@ +-- (c) Cartesi and individual authors (see AUTHORS) +-- SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +DROP TRIGGER IF EXISTS "node_config_set_updated_at" ON "node_config"; +DROP TABLE IF EXISTS "node_config"; + +DROP TRIGGER IF EXISTS "report_set_updated_at" ON "report"; +DROP TABLE IF EXISTS "report"; + +DROP TRIGGER IF EXISTS "output_set_updated_at" ON "output"; +DROP INDEX IF EXISTS "output_raw_data_address_idx"; +DROP INDEX IF EXISTS "output_raw_data_type_idx"; +DROP TABLE IF EXISTS "output"; + +DROP TRIGGER IF EXISTS "input_set_updated_at" ON "input"; +DROP INDEX IF EXISTS "input_sender_idx"; +DROP INDEX IF EXISTS "input_status_idx"; +DROP INDEX IF EXISTS "input_block_number_idx"; +DROP TABLE IF EXISTS "input"; + +DROP TRIGGER IF EXISTS "epoch_set_updated_at" ON "epoch"; +DROP INDEX IF EXISTS "epoch_status_idx"; +DROP INDEX IF EXISTS "epoch_last_block_idx"; +DROP TABLE IF EXISTS "epoch"; + +DROP TRIGGER IF EXISTS "execution_parameters_set_updated_at" ON "execution_parameters"; +DROP TABLE IF EXISTS "execution_parameters"; + +DROP TRIGGER IF EXISTS "application_set_updated_at" ON "application"; +DROP TABLE IF EXISTS "application"; + +DROP FUNCTION IF EXISTS "update_updated_at_column"; +DROP FUNCTION IF EXISTS "check_hash_siblings"; + +DROP TYPE IF EXISTS "SnapshotPolicy"; +DROP TYPE IF EXISTS "EpochStatus"; +DROP TYPE IF EXISTS "DefaultBlock"; +DROP TYPE IF EXISTS "InputCompletionStatus"; +DROP TYPE IF EXISTS "ApplicationState"; +DROP DOMAIN IF EXISTS "hash"; +DROP DOMAIN IF EXISTS "uint64"; +DROP DOMAIN IF EXISTS "ethereum_address"; diff --git a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql new file mode 100644 index 000000000..a4f0d3d72 --- /dev/null +++ b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql @@ -0,0 +1,213 @@ +-- (c) Cartesi and individual authors (see AUTHORS) +-- SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +CREATE DOMAIN "ethereum_address" AS BYTEA CHECK (octet_length(VALUE) = 20); +CREATE DOMAIN "uint64" AS NUMERIC(20, 0) CHECK (VALUE >= 0 AND VALUE <= 18446744073709551615); +CREATE DOMAIN "hash" AS BYTEA CHECK (octet_length(VALUE) = 32); + +CREATE TYPE "ApplicationState" AS ENUM ('ENABLED', 'DISABLED', 'INOPERABLE'); + +CREATE TYPE "InputCompletionStatus" AS ENUM ( + 'NONE', + 'ACCEPTED', + 'REJECTED', + 'EXCEPTION', + 'MACHINE_HALTED', + 'OUTPUTS_LIMIT_EXCEEDED', + 'CYCLE_LIMIT_EXCEEDED', + 'TIME_LIMIT_EXCEEDED', + 'PAYLOAD_LENGTH_LIMIT_EXCEEDED'); + +CREATE TYPE "DefaultBlock" AS ENUM ('FINALIZED', 'LATEST', 'PENDING', 'SAFE'); + +CREATE TYPE "EpochStatus" AS ENUM ( + 'OPEN', + 'CLOSED', + 'INPUTS_PROCESSED', + 'CLAIM_COMPUTED', + 'CLAIM_SUBMITTED', + 'CLAIM_ACCEPTED', + 'CLAIM_REJECTED'); + +CREATE TYPE "SnapshotPolicy" AS ENUM ('NONE', 'EACH_INPUT', 'EACH_EPOCH'); + +CREATE FUNCTION "update_updated_at_column"() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION check_hash_siblings(arr BYTEA[]) +RETURNS BOOLEAN AS $$ +DECLARE + elem BYTEA; +BEGIN + IF arr IS NULL THEN + RETURN TRUE; -- NULL array is allowed + END IF; + + FOREACH elem IN ARRAY arr + LOOP + IF octet_length(elem) <> 32 THEN + RETURN FALSE; -- any element not 32 bytes => fail + END IF; + END LOOP; + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql IMMUTABLE; + +CREATE TABLE "application" +( + "id" SERIAL, + "name" VARCHAR(4096) UNIQUE NOT NULL CHECK ("name" ~ '^[a-z0-9_-]+$'), + "iapplication_address" ethereum_address UNIQUE NOT NULL, + "iconsensus_address" ethereum_address NOT NULL, + "template_hash" hash NOT NULL, + "template_uri" VARCHAR(4096) NOT NULL, + "epoch_length" uint64 NOT NULL, + "state" "ApplicationState" NOT NULL, + "reason" VARCHAR(4096), + "last_processed_block" uint64 NOT NULL, + "last_claim_check_block" uint64 NOT NULL, + "last_output_check_block" uint64 NOT NULL, + "processed_inputs" uint64 NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "reason_required_for_inoperable" CHECK (NOT ("state" = 'INOPERABLE' AND ("reason" IS NULL OR LENGTH("reason") = 0))), + CONSTRAINT "application_pkey" PRIMARY KEY ("id") +); + +CREATE TRIGGER "application_set_updated_at" BEFORE UPDATE ON "application" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "execution_parameters" ( + "application_id" INT PRIMARY KEY, + "snapshot_policy" "SnapshotPolicy" NOT NULL DEFAULT 'NONE', + "snapshot_retention" BIGINT NOT NULL CHECK ("snapshot_retention" >= 0) DEFAULT 0, -- 0 means no retention + "advance_inc_cycles" BIGINT NOT NULL CHECK ("advance_inc_cycles" > 0) DEFAULT 4194304, -- 1 << 22 + "advance_max_cycles" BIGINT NOT NULL CHECK ("advance_max_cycles" > 0) DEFAULT 4611686018427387903, -- uint64 max >> 2 + "inspect_inc_cycles" BIGINT NOT NULL CHECK ("inspect_inc_cycles" > 0) DEFAULT 4194304, -- 1 << 22 + "inspect_max_cycles" BIGINT NOT NULL CHECK ("inspect_max_cycles" > 0) DEFAULT 4611686018427387903, + "advance_inc_deadline" BIGINT NOT NULL CHECK ("advance_inc_deadline" > 0) DEFAULT 10000000000, -- 10s + "advance_max_deadline" BIGINT NOT NULL CHECK ("advance_max_deadline" > 0) DEFAULT 180000000000, -- 180s + "inspect_inc_deadline" BIGINT NOT NULL CHECK ("inspect_inc_deadline" > 0) DEFAULT 10000000000, --10s + "inspect_max_deadline" BIGINT NOT NULL CHECK ("inspect_max_deadline" > 0) DEFAULT 180000000000, -- 180s + "load_deadline" BIGINT NOT NULL CHECK ("load_deadline" > 0) DEFAULT 300000000000, -- 300s + "store_deadline" BIGINT NOT NULL CHECK ("store_deadline" > 0) DEFAULT 180000000000, -- 180s + "fast_deadline" BIGINT NOT NULL CHECK ("fast_deadline" > 0) DEFAULT 5000000000, -- 5s + "max_concurrent_inspects" INT NOT NULL CHECK ("max_concurrent_inspects" > 0) DEFAULT 10, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "execution_parameters_application_id_fkey" FOREIGN KEY ("application_id") REFERENCES "application"("id") ON DELETE CASCADE +); + +CREATE TRIGGER "execution_parameters_set_updated_at" BEFORE UPDATE ON "execution_parameters" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "epoch" +( + "application_id" int4 NOT NULL, + "index" uint64 NOT NULL, + "first_block" uint64 NOT NULL, + "last_block" uint64 NOT NULL, + "claim_hash" hash, + "claim_transaction_hash" hash, + "status" "EpochStatus" NOT NULL, + "virtual_index" uint64 NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "epoch_pkey" PRIMARY KEY ("application_id", "index"), + CONSTRAINT "epoch_application_id_virtual_index_unique" UNIQUE ("application_id", "virtual_index"), + CONSTRAINT "epoch_application_id_fkey" FOREIGN KEY ("application_id") REFERENCES "application"("id") ON DELETE CASCADE +); + +CREATE INDEX "epoch_last_block_idx" ON "epoch"("application_id", "last_block"); +CREATE INDEX "epoch_status_idx" ON "epoch"("application_id", "status"); + +CREATE TRIGGER "epoch_set_updated_at" BEFORE UPDATE ON "epoch" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "input" +( + "epoch_application_id" int4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "index" uint64 NOT NULL, + "block_number" uint64 NOT NULL, + "raw_data" BYTEA NOT NULL, + "status" "InputCompletionStatus" NOT NULL, + "machine_hash" hash, + "outputs_hash" hash, + "transaction_reference" hash, + "snapshot_uri" VARCHAR(4096), + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "input_pkey" PRIMARY KEY ("epoch_application_id", "index"), + CONSTRAINT "input_application_id_tx_reference_unique" UNIQUE ("epoch_application_id", "transaction_reference"), + CONSTRAINT "input_epoch_id_fkey" FOREIGN KEY ("epoch_application_id", "epoch_index") REFERENCES "epoch"("application_id", "index") ON DELETE CASCADE +); + +CREATE INDEX "input_block_number_idx" ON "input"("epoch_application_id", "block_number"); +CREATE INDEX "input_status_idx" ON "input"("epoch_application_id", "status"); + +CREATE INDEX "input_sender_idx" ON "input" ("epoch_application_id", substring("raw_data" FROM 81 FOR 20)); + +CREATE TRIGGER "input_set_updated_at" BEFORE UPDATE ON "input" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "output" +( + "input_epoch_application_id" int4 NOT NULL, + "input_index" uint64 NOT NULL, + "index" uint64 NOT NULL, + "raw_data" BYTEA NOT NULL, + "hash" hash, + "output_hashes_siblings" BYTEA[], + "execution_transaction_hash" hash, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "output_pkey" PRIMARY KEY ("input_epoch_application_id", "index"), + CONSTRAINT "output_input_id_fkey" FOREIGN KEY ("input_epoch_application_id", "input_index") REFERENCES "input"("epoch_application_id", "index") ON DELETE CASCADE, + CONSTRAINT "output_hashes_siblings_length_check" CHECK (check_hash_siblings("output_hashes_siblings")) +); + +CREATE INDEX "output_raw_data_type_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 1 FOR 4)); + +CREATE INDEX "output_raw_data_address_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 17 FOR 20) ) +WHERE SUBSTRING("raw_data" FROM 1 FOR 4) IN ( + E'\\x10321e8b', -- DelegateCallVoucher + E'\\x237a816f' -- Voucher +); + +CREATE TRIGGER "output_set_updated_at" BEFORE UPDATE ON "output" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "report" +( + "input_epoch_application_id" int4 NOT NULL, + "input_index" uint64 NOT NULL, + "index" uint64 NOT NULL, + "raw_data" BYTEA NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "report_pkey" PRIMARY KEY ("input_epoch_application_id", "index"), + CONSTRAINT "report_input_id_fkey" FOREIGN KEY ("input_epoch_application_id", "input_index") REFERENCES "input"("epoch_application_id", "index") ON DELETE CASCADE +); + +CREATE TRIGGER "report_set_updated_at" BEFORE UPDATE ON "report" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "node_config" +( + "key" VARCHAR(255) PRIMARY KEY, + "value" jsonb NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TRIGGER "config_set_updated_at" BEFORE UPDATE ON "node_config" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + + diff --git a/internal/repository/schema/schema.go b/internal/repository/postgres/schema/schema.go similarity index 74% rename from internal/repository/schema/schema.go rename to internal/repository/postgres/schema/schema.go index ccc6742ef..1de9f5edb 100644 --- a/internal/repository/schema/schema.go +++ b/internal/repository/postgres/schema/schema.go @@ -11,15 +11,18 @@ import ( "github.com/golang-migrate/migrate/v4" mig "github.com/golang-migrate/migrate/v4" + "github.com/golang-migrate/migrate/v4/database/pgx" _ "github.com/golang-migrate/migrate/v4/database/postgres" _ "github.com/golang-migrate/migrate/v4/source/file" "github.com/golang-migrate/migrate/v4/source/iofs" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/jackc/pgx/v5/stdlib" ) //go:embed migrations/* var content embed.FS -const ExpectedVersion uint = 2 +const ExpectedVersion uint = 1 type Schema struct { migrate *mig.Migrate @@ -39,6 +42,26 @@ func New(postgresEndpoint string) (*Schema, error) { return &Schema{migrate: migrate}, nil } +func NewWithPool(pool *pgxpool.Pool) (*Schema, error) { + source, err := iofs.New(content, "migrations") + if err != nil { + return nil, err + } + + db := stdlib.OpenDBFromPool(pool) + driver, err := pgx.WithInstance(db, &pgx.Config{}) + if err != nil { + return nil, fmt.Errorf("could not instantiate pgx migrate driver: %v", err) + } + + migrate, err := mig.NewWithInstance("iofs", source, "postgres", driver) + if err != nil { + return nil, err + } + + return &Schema{migrate: migrate}, nil +} + func (s *Schema) Version() (uint, error) { version, _, err := s.migrate.Version() if err != nil && errors.Is(err, migrate.ErrNilVersion) { diff --git a/internal/repository/postgres/util.go b/internal/repository/postgres/util.go new file mode 100644 index 000000000..418c55cf7 --- /dev/null +++ b/internal/repository/postgres/util.go @@ -0,0 +1,32 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "regexp" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +var hexAddressRegex = regexp.MustCompile(`^0x[0-9a-fA-F]{40}$`) + +func isHexAddress(s string) bool { + return hexAddressRegex.MatchString(s) +} + +func getWhereClauseFromNameOrAddress(nameOrAddress string) (postgres.BoolExpression, error) { + + var whereClause postgres.BoolExpression + if isHexAddress(nameOrAddress) { + address := common.HexToAddress(nameOrAddress) + whereClause = table.Application.IapplicationAddress.EQ(postgres.Bytea(address.Bytes())) + } else { + // treat as name + whereClause = table.Application.Name.EQ(postgres.String(nameOrAddress)) + } + return whereClause, nil +} diff --git a/internal/repository/repository.go b/internal/repository/repository.go new file mode 100644 index 000000000..86954f7c9 --- /dev/null +++ b/internal/repository/repository.go @@ -0,0 +1,178 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "context" + "encoding/json" + "fmt" + "time" + + . "github.com/cartesi/rollups-node/internal/model" + "github.com/ethereum/go-ethereum/common" +) + +type Pagination struct { + Limit int64 + Offset int64 +} + +type ApplicationFilter struct { + State *ApplicationState + Name *string + Address *string +} + +type EpochFilter struct { + Status *EpochStatus + BeforeBlock *uint64 +} + +type InputFilter struct { + InputIndex *uint64 + Status *InputCompletionStatus + NotStatus *InputCompletionStatus + TransactionReference *common.Hash +} + +type Range struct { + Start uint64 + End uint64 +} + +type OutputFilter struct { + InputIndex *uint64 + BlockRange *Range +} + +type ReportFilter struct { + InputIndex *uint64 +} + +type ApplicationRepository interface { + CreateApplication(ctx context.Context, app *Application) (int64, error) + GetApplication(ctx context.Context, nameOrAddress string) (*Application, error) + UpdateApplication(ctx context.Context, app *Application) error + UpdateApplicationState(ctx context.Context, app *Application) error + DeleteApplication(ctx context.Context, id int64) error + ListApplications(ctx context.Context, f ApplicationFilter, p Pagination) ([]*Application, error) + + GetExecutionParameters(ctx context.Context, applicationID int64) (*ExecutionParameters, error) + UpdateExecutionParameters(ctx context.Context, ep *ExecutionParameters) error +} + +type EpochRepository interface { + CreateEpoch(ctx context.Context, nameOrAddress string, e *Epoch) error + // FIXME move to BulkOperationsRepository + CreateEpochsAndInputs(ctx context.Context, nameOrAddress string, epochInputMap map[*Epoch][]*Input, blockNumber uint64) error + + GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) + GetEpochByVirtualIndex(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) + + UpdateEpoch(ctx context.Context, nameOrAddress string, e *Epoch) error + UpdateEpochsClaimAccepted(ctx context.Context, nameOrAddress string, epochs []*Epoch, mostRecentBlockNumber uint64) error + UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) error + + ListEpochs(ctx context.Context, nameOrAddress string, f EpochFilter, p Pagination) ([]*Epoch, error) +} + +type InputRepository interface { + GetInput(ctx context.Context, nameOrAddress string, inputIndex uint64) (*Input, error) + GetInputByTxReference(ctx context.Context, nameOrAddress string, ref *common.Hash) (*Input, error) + GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) // FIXME remove me (list, filter and order) + ListInputs(ctx context.Context, nameOrAddress string, f InputFilter, p Pagination) ([]*Input, error) +} + +type OutputRepository interface { + GetOutput(ctx context.Context, nameOrAddress string, outputIndex uint64) (*Output, error) + UpdateOutputsExecution(ctx context.Context, nameOrAddress string, executedOutputs []*Output, blockNumber uint64) error + ListOutputs(ctx context.Context, nameOrAddress string, f OutputFilter, p Pagination) ([]*Output, error) +} + +type ReportRepository interface { + GetReport(ctx context.Context, nameOrAddress string, reportIndex uint64) (*Report, error) + ListReports(ctx context.Context, nameOrAddress string, f ReportFilter, p Pagination) ([]*Report, error) +} + +type BulkOperationsRepository interface { + StoreAdvanceResult(ctx context.Context, appId int64, ar *AdvanceResult) error + StoreClaimAndProofs(ctx context.Context, epoch *Epoch, outputs []*Output) error +} + +type NodeConfigRepository interface { + SaveNodeConfigRaw(ctx context.Context, key string, rawJSON []byte) error + LoadNodeConfigRaw(ctx context.Context, key string) (rawJSON []byte, createdAt, updatedAt time.Time, err error) +} + +// FIXME: migrate ClaimRow -> Application + Epoch and use the other interfaces +type ClaimerRepository interface { + SelectOldestComputedClaimPerApp(ctx context.Context) ( + map[common.Address]*ClaimRow, + error, + ) + SelectNewestSubmittedOrAcceptedClaimPerApp(ctx context.Context) ( + map[common.Address]*ClaimRow, + error, + ) + SelectClaimPairsPerApp(ctx context.Context) ( + map[common.Address]*ClaimRow, + map[common.Address]*ClaimRow, + error, + ) + UpdateEpochWithSubmittedClaim( + ctx context.Context, + application_id int64, + index uint64, + transaction_hash common.Hash, + ) error +} + +type Repository interface { + ApplicationRepository + EpochRepository + InputRepository + OutputRepository + ReportRepository + BulkOperationsRepository + NodeConfigRepository + ClaimerRepository + // FIXME missing close +} + +func SaveNodeConfig[T any]( + ctx context.Context, + repo NodeConfigRepository, + nc *NodeConfig[T], +) error { + data, err := json.Marshal(nc.Value) + if err != nil { + return fmt.Errorf("marshal node_config value failed: %w", err) + } + err = repo.SaveNodeConfigRaw(ctx, nc.Key, data) + if err != nil { + return err + } + return nil +} + +func LoadNodeConfig[T any]( + ctx context.Context, + repo NodeConfigRepository, + key string, +) (*NodeConfig[T], error) { + raw, createdAt, updatedAt, err := repo.LoadNodeConfigRaw(ctx, key) + if err != nil { + return nil, err + } + var val T + if err := json.Unmarshal(raw, &val); err != nil { + return nil, fmt.Errorf("unmarshal node_config value failed: %w", err) + } + return &NodeConfig[T]{ + Key: key, + Value: val, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, nil +} diff --git a/internal/repository/schema/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql b/internal/repository/schema/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql deleted file mode 100644 index 52e644c8e..000000000 --- a/internal/repository/schema/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql +++ /dev/null @@ -1,18 +0,0 @@ --- (c) Cartesi and individual authors (see AUTHORS) --- SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -DROP TABLE IF EXISTS "node_config"; -DROP TABLE IF EXISTS "snapshot"; -DROP TABLE IF EXISTS "report"; -DROP TABLE IF EXISTS "output"; -DROP TABLE IF EXISTS "input"; -DROP TABLE IF EXISTS "epoch"; -DROP TABLE IF EXISTS "execution_parameters"; -DROP TABLE IF EXISTS "application"; - -DROP FUNCTION IF EXISTS "f_maxuint64"; - -DROP TYPE IF EXISTS "InputCompletionStatus"; -DROP TYPE IF EXISTS "ApplicationStatus"; -DROP TYPE IF EXISTS "DefaultBlock"; -DROP TYPE IF EXISTS "EpochStatus"; diff --git a/internal/repository/schema/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql b/internal/repository/schema/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql deleted file mode 100644 index f23d071cc..000000000 --- a/internal/repository/schema/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql +++ /dev/null @@ -1,149 +0,0 @@ --- (c) Cartesi and individual authors (see AUTHORS) --- SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -CREATE TYPE "ApplicationStatus" AS ENUM ('RUNNING', 'NOT RUNNING'); - -CREATE TYPE "InputCompletionStatus" AS ENUM ( - 'NONE', - 'ACCEPTED', - 'REJECTED', - 'EXCEPTION', - 'MACHINE_HALTED', - 'CYCLE_LIMIT_EXCEEDED', - 'TIME_LIMIT_EXCEEDED', - 'PAYLOAD_LENGTH_LIMIT_EXCEEDED'); - -CREATE TYPE "DefaultBlock" AS ENUM ('FINALIZED', 'LATEST', 'PENDING', 'SAFE'); - -CREATE TYPE "EpochStatus" AS ENUM ( - 'OPEN', - 'CLOSED', - 'PROCESSED_ALL_INPUTS', - 'CLAIM_COMPUTED', - 'CLAIM_SUBMITTED', - 'CLAIM_ACCEPTED', - 'CLAIM_REJECTED'); - -CREATE FUNCTION "f_maxuint64"() - RETURNS NUMERIC(20,0) - LANGUAGE sql IMMUTABLE PARALLEL SAFE AS - 'SELECT 18446744073709551615'; - -CREATE TABLE "application" -( - "id" SERIAL, - "contract_address" BYTEA NOT NULL, - "template_hash" BYTEA NOT NULL, - "template_uri" VARCHAR(4096) NOT NULL, - "last_processed_block" NUMERIC(20,0) NOT NULL CHECK ("last_processed_block" >= 0 AND "last_processed_block" <= f_maxuint64()), - "status" "ApplicationStatus" NOT NULL, - "iconsensus_address" BYTEA NOT NULL, - "last_claim_check_block" NUMERIC(20,0) NOT NULL CHECK ("last_claim_check_block" >= 0 AND "last_claim_check_block" <= f_maxuint64()), - "last_output_check_block" NUMERIC(20,0) NOT NULL CHECK ("last_output_check_block" >= 0 AND "last_output_check_block" <= f_maxuint64()), - CONSTRAINT "application_pkey" PRIMARY KEY ("id"), - UNIQUE("contract_address") -); - - -CREATE TABLE "execution_parameters" ( - "application_id" INT PRIMARY KEY, - "advance_inc_cycles" BIGINT NOT NULL CHECK ("advance_inc_cycles" > 0) DEFAULT 4194304, -- 1 << 22 - "advance_max_cycles" BIGINT NOT NULL CHECK ("advance_max_cycles" > 0) DEFAULT 4611686018427387903, -- uint64 max >> 2 - "inspect_inc_cycles" BIGINT NOT NULL CHECK ("inspect_inc_cycles" > 0) DEFAULT 4194304, -- 1 << 22 - "inspect_max_cycles" BIGINT NOT NULL CHECK ("inspect_max_cycles" > 0) DEFAULT 4611686018427387903, - "advance_inc_deadline" BIGINT NOT NULL CHECK ("advance_inc_deadline" > 0) DEFAULT 10000000000, -- 10s - "advance_max_deadline" BIGINT NOT NULL CHECK ("advance_max_deadline" > 0) DEFAULT 180000000000, -- 180s - "inspect_inc_deadline" BIGINT NOT NULL CHECK ("inspect_inc_deadline" > 0) DEFAULT 10000000000, --10s - "inspect_max_deadline" BIGINT NOT NULL CHECK ("inspect_max_deadline" > 0) DEFAULT 180000000000, -- 180s - "load_deadline" BIGINT NOT NULL CHECK ("load_deadline" > 0) DEFAULT 300000000000, -- 300s - "store_deadline" BIGINT NOT NULL CHECK ("store_deadline" > 0) DEFAULT 180000000000, -- 180s - "fast_deadline" BIGINT NOT NULL CHECK ("fast_deadline" > 0) DEFAULT 5000000000, -- 5s - "max_concurrent_inspects" INT NOT NULL CHECK ("max_concurrent_inspects" > 0) DEFAULT 10, - CONSTRAINT "application_id_fkey" FOREIGN KEY ("application_id") REFERENCES "application"("id") -); - -CREATE TABLE "epoch" -( - "id" BIGSERIAL, - "application_address" BYTEA NOT NULL, - "index" BIGINT NOT NULL, - "first_block" NUMERIC(20,0) NOT NULL CHECK ("first_block" >= 0 AND "first_block" <= f_maxuint64()), - "last_block" NUMERIC(20,0) NOT NULL CHECK ("last_block" >= 0 AND "last_block" <= f_maxuint64()), - "claim_hash" BYTEA, - "transaction_hash" BYTEA, - "status" "EpochStatus" NOT NULL, - CONSTRAINT "epoch_pkey" PRIMARY KEY ("id"), - CONSTRAINT "epoch_application_address_fkey" FOREIGN KEY ("application_address") REFERENCES "application"("contract_address"), - UNIQUE ("index","application_address") -); - -CREATE INDEX "epoch_idx" ON "epoch"("index"); -CREATE INDEX "epoch_last_block_idx" ON "epoch"("last_block"); - -CREATE TABLE "input" -( - "id" BIGSERIAL, - "index" NUMERIC(20,0) NOT NULL CHECK ("index" >= 0 AND "index" <= f_maxuint64()), - "raw_data" BYTEA NOT NULL, - "block_number" NUMERIC(20,0) NOT NULL CHECK ("block_number" >= 0 AND "block_number" <= f_maxuint64()), - "status" "InputCompletionStatus" NOT NULL, - "machine_hash" BYTEA, - "outputs_hash" BYTEA, - "application_address" BYTEA NOT NULL, - "epoch_id" BIGINT NOT NULL, - CONSTRAINT "input_pkey" PRIMARY KEY ("id"), - CONSTRAINT "input_application_address_fkey" FOREIGN KEY ("application_address") REFERENCES "application"("contract_address"), - CONSTRAINT "input_epoch_fkey" FOREIGN KEY ("epoch_id") REFERENCES "epoch"("id"), - UNIQUE("index", "application_address") -); - -CREATE INDEX "input_idx" ON "input"("block_number"); - -CREATE TABLE "output" -( - "id" BIGSERIAL, - "index" NUMERIC(20,0) NOT NULL CHECK ("index" >= 0 AND "index" <= f_maxuint64()), - "raw_data" BYTEA NOT NULL, - "hash" BYTEA, - "output_hashes_siblings" BYTEA[], - "input_id" BIGINT NOT NULL, - "transaction_hash" BYTEA, - CONSTRAINT "output_pkey" PRIMARY KEY ("id"), - CONSTRAINT "output_input_id_fkey" FOREIGN KEY ("input_id") REFERENCES "input"("id") -); - -CREATE INDEX "output_idx" ON "output"("index"); - -CREATE TABLE "report" -( - "id" BIGSERIAL, - "index" NUMERIC(20,0) NOT NULL CHECK ("index" >= 0 AND "index" <= f_maxuint64()), - "raw_data" BYTEA NOT NULL, - "input_id" BIGINT NOT NULL, - CONSTRAINT "report_pkey" PRIMARY KEY ("id"), - CONSTRAINT "report_input_id_fkey" FOREIGN KEY ("input_id") REFERENCES "input"("id") -); - -CREATE INDEX "report_idx" ON "report"("index"); - -CREATE TABLE "snapshot" -( - "id" BIGSERIAL, - "input_id" BIGINT NOT NULL, - "application_address" BYTEA NOT NULL, - "uri" VARCHAR(4096) NOT NULL, - CONSTRAINT "snapshot_pkey" PRIMARY KEY ("id"), - CONSTRAINT "snapshot_input_id_fkey" FOREIGN KEY ("input_id") REFERENCES "input"("id"), - CONSTRAINT "snapshot_application_address_fkey" FOREIGN KEY ("application_address") REFERENCES "application"("contract_address"), - UNIQUE("input_id") -); - -CREATE TABLE "node_config" -( - "default_block" "DefaultBlock" NOT NULL, - "input_box_deployment_block" INT NOT NULL, - "input_box_address" BYTEA NOT NULL, - "chain_id" INT NOT NULL -); - - diff --git a/internal/repository/schema/migrations/000002_create_postgraphile_view.down.sql b/internal/repository/schema/migrations/000002_create_postgraphile_view.down.sql deleted file mode 100644 index 4fc005d2a..000000000 --- a/internal/repository/schema/migrations/000002_create_postgraphile_view.down.sql +++ /dev/null @@ -1,4 +0,0 @@ --- (c) Cartesi and individual authors (see AUTHORS) --- SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -DROP SCHEMA graphql CASCADE; diff --git a/internal/repository/schema/migrations/000002_create_postgraphile_view.up.sql b/internal/repository/schema/migrations/000002_create_postgraphile_view.up.sql deleted file mode 100644 index 0ea36662d..000000000 --- a/internal/repository/schema/migrations/000002_create_postgraphile_view.up.sql +++ /dev/null @@ -1,77 +0,0 @@ - --- (c) Cartesi and individual authors (see AUTHORS) --- SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -CREATE SCHEMA IF NOT EXISTS graphql; - -CREATE OR REPLACE VIEW graphql."applications" AS - SELECT - "contract_address", - "template_hash", - "last_processed_block", - "status" - FROM - "application"; - -CREATE OR REPLACE VIEW graphql."epochs" AS - SELECT - "index", - "application_address", - "first_block", - "last_block", - "transaction_hash", - "claim_hash", - "status" - FROM - "epoch"; - -CREATE OR REPLACE VIEW graphql."inputs" AS - SELECT - i."index", - i."status", - i."block_number", - i."raw_data", - i."machine_hash", - i."outputs_hash", - i."application_address", - e."index" as "epoch_index" - FROM - "input" i - INNER JOIN - "epoch" e on i."epoch_id" = e."id"; - -CREATE OR REPLACE VIEW graphql."outputs" AS - SELECT - o."index", - o."raw_data", - o."output_hashes_siblings", - o."transaction_hash", - i."index" as "input_index" - FROM - "output" o - INNER JOIN - "input" i on o."input_id"=i."id"; - -CREATE OR REPLACE VIEW graphql."reports" AS - SELECT - r."index", - r."raw_data", - i."index" as "input_index" - FROM - "report" r - INNER JOIN - "input" i on r."input_id"=i."id"; - - -COMMENT ON VIEW graphql."inputs" is - E'@foreignKey (application_address) references applications(contract_address)|@fieldName applicationByApplicationAddress\n@foreignKey (epoch_index) references epochs(index)|@fieldName epochByEpochIndex'; - -COMMENT ON VIEW graphql."outputs" is - E'@foreignKey (input_index) references inputs(index)|@fieldName inputByInputIndex'; - -COMMENT ON VIEW graphql."reports" is - E'@foreignKey (input_index) references inputs(index)|@fieldName inputByInputIndex'; - -COMMENT ON VIEW graphql."epochs" is - E'@foreignKey (application_address) references applications(contract_address)|@fieldName applicationByApplicationAddress'; - \ No newline at end of file diff --git a/internal/repository/validator.go b/internal/repository/validator.go deleted file mode 100644 index b51ea2782..000000000 --- a/internal/repository/validator.go +++ /dev/null @@ -1,348 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -package repository - -import ( - "context" - "errors" - "fmt" - - . "github.com/cartesi/rollups-node/internal/model" - "github.com/jackc/pgx/v5" -) - -// GetOutputsProducedInBlockRange returns outputs produced by inputs sent to the application -// between start and end blocks, inclusive. Outputs are returned in ascending -// order by index. -func (pg *Database) GetOutputsProducedInBlockRange( - ctx context.Context, - application Address, - startBlock uint64, - endBlock uint64, -) ([]Output, error) { - query := ` - SELECT - o.id, - o.index, - o.raw_data, - o.hash, - o.output_hashes_siblings, - o.input_id - FROM - output o - INNER JOIN - input i - ON - o.input_id=i.id - WHERE - i.block_number BETWEEN @startBlock AND @endBlock - AND - i.application_address=@appAddress - AND - i.status='ACCEPTED' - ORDER BY - o.index ASC - ` - - args := pgx.NamedArgs{"startBlock": startBlock, "endBlock": endBlock, "appAddress": application} - rows, err := pg.db.Query(ctx, query, args) - if err != nil { - return nil, fmt.Errorf("GetOutputs failed: %w", err) - } - - var ( - id, index, inputId uint64 - rawData []byte - hash *Hash - outputHashesSiblings []Hash - outputs []Output - ) - scans := []any{&id, &index, &rawData, &hash, &outputHashesSiblings, &inputId} - _, err = pgx.ForEachRow(rows, scans, func() error { - output := Output{ - Id: id, - Index: index, - RawData: rawData, - Hash: hash, - OutputHashesSiblings: outputHashesSiblings, - InputId: inputId, - } - outputs = append(outputs, output) - return nil - }) - if err != nil { - return nil, fmt.Errorf("GetOutputs failed: %w", err) - } - return outputs, nil -} - -// GetProcessedEpochs returns epochs from the application which had all -// its inputs processed. Epochs are returned in ascending order by index. -func (pg *Database) GetProcessedEpochs(ctx context.Context, application Address) ([]Epoch, error) { - query := ` - SELECT - id, - application_address, - index, - first_block, - last_block, - claim_hash, - transaction_hash, - status - FROM - epoch - WHERE - application_address=@appAddress AND status=@status - ORDER BY - index ASC` - - args := pgx.NamedArgs{ - "appAddress": application, - "status": EpochStatusProcessedAllInputs, - } - - rows, err := pg.db.Query(ctx, query, args) - if err != nil { - return nil, fmt.Errorf("GetProcessedEpochs failed: %w", err) - } - - var ( - id, index, firstBlock, lastBlock uint64 - appAddress Address - claimHash, transactionHash *Hash - status string - results []Epoch - ) - - scans := []any{ - &id, &appAddress, &index, &firstBlock, &lastBlock, &claimHash, &transactionHash, &status, - } - _, err = pgx.ForEachRow(rows, scans, func() error { - epoch := Epoch{ - Id: id, - Index: index, - AppAddress: appAddress, - FirstBlock: firstBlock, - LastBlock: lastBlock, - ClaimHash: claimHash, - TransactionHash: transactionHash, - Status: EpochStatus(status), - } - results = append(results, epoch) - return nil - }) - if err != nil { - return nil, fmt.Errorf("GetProcessedEpochs failed: %w", err) - } - return results, nil -} - -// GetLastInputOutputsHash returns the outputs Merkle tree hash calculated -// by the Cartesi Machine after it processed the last input in the provided -// epoch. -func (pg *Database) GetLastInputOutputsHash( - ctx context.Context, - epochIndex uint64, - appAddress Address, -) (*Hash, error) { - //Get Epoch from Database - epoch, err := pg.GetEpoch(ctx, epochIndex, appAddress) - if err != nil { - return nil, err - } - - //Check Epoch Status - switch epoch.Status { //nolint:exhaustive - case EpochStatusOpen, EpochStatusClosed: - err := fmt.Errorf( - "epoch '%d' of app '%v' is still being processed", - epoch.Index, epoch.AppAddress, - ) - return nil, err - default: - break - } - - //Get epoch last input - query := ` - SELECT - outputs_hash - FROM - input - WHERE - epoch_id = @id - ORDER BY - index DESC - LIMIT 1 - ` - - args := pgx.NamedArgs{"id": epoch.Id} - var outputHash Hash - - err = pg.db.QueryRow(ctx, query, args).Scan(&outputHash) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - pg.Logger.Warn( - "no inputs", - "service", "repository", - "epoch", epoch.Index, - "app", epoch.AppAddress, - ) - return nil, nil - } - return nil, fmt.Errorf("GetLastInputOutputsHash failed: %w", err) - } - return &outputHash, nil -} - -// GetPreviousEpoch returns the epoch that ended one block before the start -// of the current epoch -func (pg *Database) GetPreviousEpoch(ctx context.Context, currentEpoch Epoch) (*Epoch, error) { - query := ` - SELECT - id, - application_address, - index, - first_block, - last_block, - claim_hash, - transaction_hash, - status - FROM - epoch - WHERE - application_address=@appAddress AND index < @index - ORDER BY - index DESC - LIMIT 1 - ` - - args := pgx.NamedArgs{ - "appAddress": currentEpoch.AppAddress, - "index": currentEpoch.Index, - } - - var ( - id, index, firstBlock, lastBlock uint64 - appAddress Address - claimHash, transactionHash *Hash - status EpochStatus - ) - - err := pg.db.QueryRow(ctx, query, args).Scan( - &id, - &appAddress, - &index, - &firstBlock, - &lastBlock, - &claimHash, - &transactionHash, - &status, - ) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, nil - } - return nil, fmt.Errorf("GetPreviousEpoch failed: %w", err) - } - - return &Epoch{ - Id: id, - Index: index, - FirstBlock: firstBlock, - LastBlock: lastBlock, - TransactionHash: transactionHash, - ClaimHash: claimHash, - Status: status, - AppAddress: appAddress, - }, nil -} - -// SetEpochClaimAndInsertProofsTransaction performs a database transaction -// containing two operations: -// -// 1. Updates an epoch, adding its claim and modifying its status. -// -// 2. Updates several outputs with their Keccak256 hash and proof. -func (pg *Database) SetEpochClaimAndInsertProofsTransaction( - ctx context.Context, - epoch Epoch, - outputs []Output, -) error { - query1 := ` - UPDATE epoch - SET - claim_hash=@claimHash, - status=@status - WHERE - id = @id - ` - - args := pgx.NamedArgs{ - "claimHash": epoch.ClaimHash, - "status": EpochStatusClaimComputed, - "id": epoch.Id, - } - - tx, err := pg.db.Begin(ctx) - if err != nil { - return fmt.Errorf("SetEpochClaimAndInsertProofsTransaction failed: %w", err) - } - tag, err := tx.Exec(ctx, query1, args) - if err != nil { - return errors.Join( - fmt.Errorf("SetEpochClaimAndInsertProofsTransaction failed: %w", err), - tx.Rollback(ctx), - ) - } - if tag.RowsAffected() != 1 { - return errors.Join( - fmt.Errorf("failed to update epoch %d: no rows affected", epoch.Index), - tx.Rollback(ctx), - ) - } - - query2 := ` - UPDATE - output - SET - hash = @hash, - output_hashes_siblings = @outputHashesSiblings - WHERE - id = @id - ` - - for _, output := range outputs { - outputArgs := pgx.NamedArgs{ - "hash": output.Hash, - "outputHashesSiblings": output.OutputHashesSiblings, - "id": output.Id, - } - tag, err := tx.Exec(ctx, query2, outputArgs) - if err != nil { - return errors.Join( - fmt.Errorf("failed to insert proof for output '%d'. %w", output.Index, err), - tx.Rollback(ctx), - ) - } - if tag.RowsAffected() == 0 { - return errors.Join( - fmt.Errorf( - "failed to insert proof for output '%d'. No rows affected", - output.Index, - ), - tx.Rollback(ctx), - ) - } - } - - err = tx.Commit(ctx) - if err != nil { - return errors.Join( - fmt.Errorf("SetEpochClaimAndInsertProofsTransaction failed: %w", err), - tx.Rollback(ctx), - ) - } - return nil -} diff --git a/internal/repository/validator_test.go b/internal/repository/validator_test.go deleted file mode 100644 index 185db39d3..000000000 --- a/internal/repository/validator_test.go +++ /dev/null @@ -1,374 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -package repository - -import ( - . "github.com/cartesi/rollups-node/internal/model" - "github.com/ethereum/go-ethereum/common" -) - -func (s *RepositorySuite) TestGetOutputsProducedInBlockRange() { - // get outputs from the existing app - outputs, err := s.database.GetOutputsProducedInBlockRange( - s.ctx, - common.HexToAddress("deadbeef"), - 1, 3, - ) - s.Require().Nil(err) - s.Require().Len(outputs, 2) - - // add an output from another app - app := Application{ - ContractAddress: common.HexToAddress("deadbeee"), - TemplateHash: common.BytesToHash([]byte("template")), - Status: ApplicationStatusRunning, - } - _, err = s.database.InsertApplication(s.ctx, &app) - s.Require().Nil(err) - - epoch := Epoch{ - Index: 0, - AppAddress: app.ContractAddress, - FirstBlock: 0, - LastBlock: 99, - Status: EpochStatusProcessedAllInputs, - } - epoch.Id, err = s.database.InsertEpoch(s.ctx, &epoch) - s.Require().Nil(err) - - expectedHash := common.BytesToHash([]byte("outputs hash")) - input := Input{ - Index: 0, - CompletionStatus: InputStatusAccepted, - BlockNumber: 1, - OutputsHash: &expectedHash, - RawData: []byte("data"), - AppAddress: app.ContractAddress, - EpochId: epoch.Id, - } - input.Id, err = s.database.InsertInput(s.ctx, &input) - s.Require().Nil(err) - - newOutput := &Output{ - Index: 0, - RawData: []byte("data"), - InputId: input.Id, - } - newOutput.Id, err = s.database.InsertOutput(s.ctx, newOutput) - s.Require().Nil(err) - - // the output from the other application is not considered - outputs, err = s.database.GetOutputsProducedInBlockRange( - s.ctx, - common.HexToAddress("deadbeef"), - 1, 3, - ) - s.Require().Nil(err) - s.Require().Len(outputs, 2) - for _, output := range outputs { - s.NotEqual(newOutput.Id, output.Id) - } -} - -func (s *RepositorySuite) TestGetProcessedEpochs() { - app := Application{ - ContractAddress: common.HexToAddress("deadbeed"), - TemplateHash: common.BytesToHash([]byte("template")), - Status: ApplicationStatusRunning, - } - _, err := s.database.InsertApplication(s.ctx, &app) - s.Require().Nil(err) - - // no epochs, should return nothing - epochs, err := s.database.GetProcessedEpochs(s.ctx, app.ContractAddress) - s.Require().Nil(err) - s.Len(epochs, 0) - - epoch := Epoch{ - AppAddress: app.ContractAddress, - Index: 0, - FirstBlock: 0, - LastBlock: 99, - Status: EpochStatusOpen, - } - epoch.Id, err = s.database.InsertEpoch(s.ctx, &epoch) - s.Require().Nil(err) - - // a single non-processed epoch, should return nothing - epochs, err = s.database.GetProcessedEpochs(s.ctx, app.ContractAddress) - s.Require().Nil(err) - s.Len(epochs, 0) - - epoch2 := Epoch{ - AppAddress: app.ContractAddress, - Index: 1, - FirstBlock: 100, - LastBlock: 199, - Status: EpochStatusProcessedAllInputs, - } - epoch2.Id, err = s.database.InsertEpoch(s.ctx, &epoch2) - s.Require().Nil(err) - - // should return the processed epoch - epochs, err = s.database.GetProcessedEpochs(s.ctx, app.ContractAddress) - s.Require().Nil(err) - s.Len(epochs, 1) - s.Contains(epochs, epoch2) -} - -func (s *RepositorySuite) TestGetLastInputOutputHash() { - app := Application{ - ContractAddress: common.HexToAddress("deadbeec"), - TemplateHash: common.BytesToHash([]byte("template")), - Status: ApplicationStatusRunning, - } - _, err := s.database.InsertApplication(s.ctx, &app) - s.Require().Nil(err) - - epoch := Epoch{ - Index: 0, - AppAddress: app.ContractAddress, - FirstBlock: 0, - LastBlock: 99, - Status: EpochStatusOpen, - } - epoch.Id, err = s.database.InsertEpoch(s.ctx, &epoch) - s.Require().Nil(err) - - // should fail - hash, err := s.database.GetLastInputOutputsHash(s.ctx, epoch.Index, epoch.AppAddress) - s.Require().NotNil(err) - s.Nil(hash) - s.ErrorContains(err, "still being processed") - - epoch2 := Epoch{ - Index: 1, - AppAddress: app.ContractAddress, - FirstBlock: 100, - LastBlock: 199, - Status: EpochStatusClosed, - } - epoch2.Id, err = s.database.InsertEpoch(s.ctx, &epoch2) - s.Require().Nil(err) - - // should fail - hash, err = s.database.GetLastInputOutputsHash(s.ctx, epoch2.Index, epoch2.AppAddress) - s.Require().NotNil(err) - s.Nil(hash) - s.ErrorContains(err, "still being processed") - - epoch3 := Epoch{ - Index: 2, - AppAddress: app.ContractAddress, - FirstBlock: 200, - LastBlock: 299, - Status: EpochStatusProcessedAllInputs, - } - epoch3.Id, err = s.database.InsertEpoch(s.ctx, &epoch3) - s.Require().Nil(err) - - expectedHash := common.BytesToHash([]byte("outputs hash")) - input := &Input{ - Index: 0, - CompletionStatus: InputStatusAccepted, - BlockNumber: 1, - OutputsHash: &expectedHash, - RawData: []byte("data"), - AppAddress: app.ContractAddress, - EpochId: epoch3.Id, - } - input.Id, err = s.database.InsertInput(s.ctx, input) - s.Require().Nil(err) - - hash, err = s.database.GetLastInputOutputsHash(s.ctx, epoch3.Index, epoch3.AppAddress) - s.Require().Nil(err) - s.Require().NotNil(hash) - s.Equal(expectedHash, *hash) -} - -func (s *RepositorySuite) TestGetPreviousEpoch() { - app := &Application{ - ContractAddress: common.HexToAddress("deadbeeb"), - TemplateHash: common.BytesToHash([]byte("template")), - Status: ApplicationStatusRunning, - } - _, err := s.database.InsertApplication(s.ctx, app) - s.Require().Nil(err) - - epoch := Epoch{ - Index: 0, - AppAddress: app.ContractAddress, - FirstBlock: 0, - LastBlock: 99, - Status: EpochStatusClaimAccepted, - } - epoch.Id, err = s.database.InsertEpoch(s.ctx, &epoch) - s.Require().Nil(err) - - // first epoch, should return nil - previousEpoch, err := s.database.GetPreviousEpoch(s.ctx, epoch) - s.Require().Nil(previousEpoch) - s.Require().Nil(err) - - epoch2 := Epoch{ - Index: 1, - AppAddress: app.ContractAddress, - FirstBlock: 100, - LastBlock: 199, - Status: EpochStatusClaimAccepted, - } - epoch2.Id, err = s.database.InsertEpoch(s.ctx, &epoch2) - s.Require().Nil(err) - - // second epoch, should return first - previousEpoch, err = s.database.GetPreviousEpoch(s.ctx, epoch2) - s.Require().Nil(err) - s.Require().NotNil(previousEpoch) - s.Require().Equal(previousEpoch.Id, epoch.Id) -} - -func (s *RepositorySuite) TestSetEpochClaimAndInsertProofsTransaction() { - app := Application{ - ContractAddress: common.HexToAddress("deadbeea"), - TemplateHash: common.BytesToHash([]byte("template")), - Status: ApplicationStatusRunning, - } - _, err := s.database.InsertApplication(s.ctx, &app) - s.Require().Nil(err) - - epoch := Epoch{ - Index: 0, - AppAddress: app.ContractAddress, - FirstBlock: 0, - LastBlock: 99, - Status: EpochStatusProcessedAllInputs, - } - epoch.Id, err = s.database.InsertEpoch(s.ctx, &epoch) - s.Require().Nil(err) - - input := Input{ - Index: 0, - CompletionStatus: InputStatusAccepted, - BlockNumber: 1, - RawData: []byte("data"), - AppAddress: app.ContractAddress, - EpochId: epoch.Id, - } - input.Id, err = s.database.InsertInput(s.ctx, &input) - s.Require().Nil(err) - - output1 := Output{ - Index: 100, - RawData: []byte("data"), - InputId: input.Id, - } - output1.Id, err = s.database.InsertOutput(s.ctx, &output1) - s.Require().Nil(err) - - output2 := Output{ - Index: 101, - RawData: []byte("data"), - InputId: input.Id, - } - output2.Id, err = s.database.InsertOutput(s.ctx, &output2) - s.Require().Nil(err) - - expectedClaim := common.BytesToHash([]byte("claim")) - epoch.ClaimHash = &expectedClaim - epoch.Status = EpochStatusClaimComputed - expectedSiblings1 := []Hash{{}, {}} - expectedHash1 := common.BytesToHash([]byte("output")) - output1.Hash = &expectedHash1 - output1.OutputHashesSiblings = expectedSiblings1 - expectedHash2 := common.BytesToHash([]byte("output")) - expectedSiblings2 := []Hash{{}, {}, {}} - output2.Hash = &expectedHash2 - output2.OutputHashesSiblings = expectedSiblings2 - - err = s.database.SetEpochClaimAndInsertProofsTransaction( - s.ctx, - epoch, - []Output{output1, output2}, - ) - s.Require().Nil(err) - - updatedClaim, err := s.database.GetEpoch(s.ctx, 0, epoch.AppAddress) - s.Require().Nil(err) - s.Require().NotNil(updatedClaim) - s.Require().NotNil(updatedClaim.ClaimHash) - s.Equal(expectedClaim, *updatedClaim.ClaimHash) - s.Equal(EpochStatusClaimComputed, updatedClaim.Status) - - updatedOutput1, err := s.database.GetOutput(s.ctx, input.AppAddress, 100) - s.Require().Nil(err) - s.Require().NotNil(updatedOutput1) - s.Require().NotNil(updatedOutput1.Hash) - s.Equal(expectedHash1, *updatedOutput1.Hash) - s.Equal(expectedSiblings1, updatedOutput1.OutputHashesSiblings) - - updatedOutput2, err := s.database.GetOutput(s.ctx, input.AppAddress, 101) - s.Require().Nil(err) - s.Require().NotNil(updatedOutput2) - s.Require().NotNil(updatedOutput2.Hash) - s.Equal(expectedHash2, *updatedOutput2.Hash) - s.Equal(expectedSiblings2, updatedOutput2.OutputHashesSiblings) -} - -func (s *RepositorySuite) TestSetEpochClaimAndInsertProofsTransactionRollback() { - app := Application{ - ContractAddress: common.HexToAddress("deadbeff"), - TemplateHash: common.BytesToHash([]byte("template")), - Status: ApplicationStatusRunning, - } - _, err := s.database.InsertApplication(s.ctx, &app) - s.Require().Nil(err) - - epoch := Epoch{ - Index: 0, - AppAddress: app.ContractAddress, - FirstBlock: 0, - LastBlock: 99, - Status: EpochStatusProcessedAllInputs, - } - epoch.Id, err = s.database.InsertEpoch(s.ctx, &epoch) - s.Require().Nil(err) - - input := Input{ - Index: 0, - CompletionStatus: InputStatusAccepted, - BlockNumber: 1, - RawData: []byte("data"), - AppAddress: app.ContractAddress, - EpochId: epoch.Id, - } - input.Id, err = s.database.InsertInput(s.ctx, &input) - s.Require().Nil(err) - - output1 := Output{ - Index: 102, - RawData: []byte("data"), - InputId: input.Id, - } - output1.Id, err = s.database.InsertOutput(s.ctx, &output1) - s.Require().Nil(err) - - output1.Id = 978233982398 // non-existing id - claim := common.BytesToHash([]byte("claim")) - epoch.ClaimHash = &claim - epoch.Status = EpochStatusClaimComputed - - err = s.database.SetEpochClaimAndInsertProofsTransaction( - s.ctx, - epoch, - []Output{output1}, - ) - s.Require().NotNil(err) - s.ErrorContains(err, "No rows affected") - - nonUpdatedEpoch, err := s.database.GetEpoch(s.ctx, epoch.Index, epoch.AppAddress) - s.Require().Nil(err) - s.Require().NotNil(nonUpdatedEpoch) - s.Nil(nonUpdatedEpoch.ClaimHash) - s.Equal(EpochStatusProcessedAllInputs, nonUpdatedEpoch.Status) -} diff --git a/internal/validator/validator.go b/internal/validator/validator.go index b83898b2d..9075baebd 100644 --- a/internal/validator/validator.go +++ b/internal/validator/validator.go @@ -14,7 +14,9 @@ import ( "github.com/cartesi/rollups-node/internal/merkle" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) @@ -49,7 +51,7 @@ func Create(c *CreateInfo, s *Service) error { return service.WithTimeout(c.MaxStartupTime, func() error { if c.Repository == nil { - c.Repository, err = repository.Connect(s.Context, c.PostgresEndpoint.Value, s.Logger) + c.Repository, err = factory.NewRepositoryFromConnectionString(s.Context, c.PostgresEndpoint.Value) if err != nil { return err } @@ -85,41 +87,49 @@ func (v *Service) String() string { const MAX_OUTPUT_TREE_HEIGHT = 63 type ValidatorRepository interface { - // GetAllRunningApplications returns a slice with the applications currently - // being validated by the node. - GetAllRunningApplications(ctx context.Context) ([]Application, error) + ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination) ([]*Application, error) + // GetOutputsProducedInBlockRange returns outputs produced by inputs sent to // the application in the provided block range, inclusive. Outputs are // returned in ascending order by index. - GetOutputsProducedInBlockRange( - ctx context.Context, - application Address, - firstBlock, lastBlock uint64, - ) ([]Output, error) + // GetOutputsProducedInBlockRange( + // ctx context.Context, + // application common.Address, + // firstBlock, lastBlock uint64, + // ) ([]Output, error) + ListOutputs(ctx context.Context, nameOrAddress string, f repository.OutputFilter, p repository.Pagination) ([]*Output, error) + // GetProcessedEpochs returns epochs from the application which had all // its inputs processed. Epochs are returned in ascending order by index. - GetProcessedEpochs(ctx context.Context, application Address) ([]Epoch, error) + // GetProcessedEpochs(ctx context.Context, application common.Address) ([]Epoch, error) + ListEpochs(ctx context.Context, nameOrAddress string, f repository.EpochFilter, p repository.Pagination) ([]*Epoch, error) + // GetLastInputOutputsHash returns the outputs Merkle tree hash calculated // by the Cartesi Machine after it processed the last input in the epoch. - GetLastInputOutputsHash( - ctx context.Context, - epochIndex uint64, - appAddress Address, - ) (*Hash, error) + GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) // FIXME migrate to list + //ListInputs(ctx context.Context, nameOrAddress string, f repository.InputFilter, p repository.Pagination) ([]*Input, error) + // GetPreviousEpoch returns the epoch that ended one block before the start // of the current epoch - GetPreviousEpoch(ctx context.Context, currentEpoch Epoch) (*Epoch, error) + GetEpochByVirtualIndex(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) + // ValidateEpochTransaction performs a database transaction // containing two operations: // // 1. Updates an epoch, adding its claim and modifying its status. // // 2. Updates outputs with their Keccak256 hash and proof. - SetEpochClaimAndInsertProofsTransaction( - ctx context.Context, - epoch Epoch, - outputs []Output, - ) error + // SetEpochClaimAndInsertProofsTransaction( + // ctx context.Context, + // epoch Epoch, + // outputs []Output, + // ) error + StoreClaimAndProofs(ctx context.Context, epoch *Epoch, outputs []*Output) error +} + +func getAllRunningApplications(ctx context.Context, er ValidatorRepository) ([]*Application, error) { + f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled)} + return er.ListApplications(ctx, f, repository.Pagination{}) } // Run executes the Validator main logic of producing claims and/or proofs @@ -127,7 +137,7 @@ type ValidatorRepository interface { // inside a loop. If an error occurs while processing any epoch, it halts and // returns the error. func (v *Service) Run(ctx context.Context) error { - apps, err := v.repository.GetAllRunningApplications(ctx) + apps, err := getAllRunningApplications(ctx, v.repository) if err != nil { return fmt.Errorf("failed to get running applications. %w", err) } @@ -140,27 +150,35 @@ func (v *Service) Run(ctx context.Context) error { return nil } +func getProcessedEpochs(ctx context.Context, er ValidatorRepository, address string) ([]*Epoch, error) { + f := repository.EpochFilter{Status: Pointer(EpochStatus_InputsProcessed)} + return er.ListEpochs(ctx, address, f, repository.Pagination{}) +} + // validateApplication calculates, validates and stores the claim and/or proofs // for each processed epoch of the application. -func (v *Service) validateApplication(ctx context.Context, app Application) error { - v.Logger.Debug("starting validation", "application", app.ContractAddress) - processedEpochs, err := v.repository.GetProcessedEpochs(ctx, app.ContractAddress) +func (v *Service) validateApplication(ctx context.Context, app *Application) error { + v.Logger.Debug("Starting validation", "application", app.Name) + appAddress := app.IApplicationAddress.String() + processedEpochs, err := getProcessedEpochs(ctx, v.repository, appAddress) if err != nil { return fmt.Errorf( "failed to get processed epochs of application %v. %w", - app.ContractAddress, err, + app.IApplicationAddress, err, ) } for _, epoch := range processedEpochs { - v.Logger.Debug("started calculating claim", - "app", app.ContractAddress, + v.Logger.Debug("Started calculating claim", + "app", app.IApplicationAddress, "epoch_index", epoch.Index, + "last_block", epoch.LastBlock, ) - claim, outputs, err := v.createClaimAndProofs(ctx, epoch) - v.Logger.Info("claim calculated", - "app", app.ContractAddress, + claim, outputs, err := v.createClaimAndProofs(ctx, appAddress, epoch) + v.Logger.Info("Claim Computed", + "app", app.IApplicationAddress, "epoch_index", epoch.Index, + "last_block", epoch.LastBlock, ) if err != nil { return err @@ -171,83 +189,100 @@ func (v *Service) validateApplication(ctx context.Context, app Application) erro // last input in the epoch must match the claim hash calculated by the // Validator. We first retrieve the hash calculated by the // Cartesi Machine... - machineClaim, err := v.repository.GetLastInputOutputsHash( + input, err := v.repository.GetLastInput( ctx, + appAddress, epoch.Index, - epoch.AppAddress, ) if err != nil { return fmt.Errorf( "failed to get the machine claim for epoch %v of application %v. %w", - epoch.Index, epoch.AppAddress, err, + epoch.Index, app.IApplicationAddress, err, ) } - if machineClaim == nil { + if input.OutputsHash == nil { return fmt.Errorf( "inconsistent state: machine claim for epoch %v of application %v was not found", - epoch.Index, epoch.AppAddress, + epoch.Index, app.IApplicationAddress, ) } // ...and compare it to the hash calculated by the Validator - if *machineClaim != *claim { + if *input.OutputsHash != *claim { return fmt.Errorf( "validator claim does not match machine claim for epoch %v of application %v", - epoch.Index, epoch.AppAddress, + epoch.Index, app.IApplicationAddress, ) } // update the epoch status and its claim - epoch.Status = EpochStatusClaimComputed + epoch.Status = EpochStatus_ClaimComputed epoch.ClaimHash = claim // store the epoch and proofs in the database - err = v.repository.SetEpochClaimAndInsertProofsTransaction(ctx, epoch, outputs) + err = v.repository.StoreClaimAndProofs(ctx, epoch, outputs) if err != nil { return fmt.Errorf( "failed to store claim and proofs for epoch %v of application %v. %w", - epoch.Index, epoch.AppAddress, err, + epoch.Index, app.IApplicationAddress, err, ) } } if len(processedEpochs) == 0 { v.Logger.Debug("no processed epochs to validate", - "app", app.ContractAddress, + "app", app.IApplicationAddress, ) } return nil } +func getOutputsProducedInBlockRange( + ctx context.Context, + vr ValidatorRepository, + address string, + start uint64, + end uint64, +) ([]*Output, error) { + r := repository.Range{Start: start, End: end} + f := repository.OutputFilter{BlockRange: Pointer(r)} + return vr.ListOutputs(ctx, address, f, repository.Pagination{}) +} + // createClaimAndProofs calculates the claim and proofs for an epoch. It returns // the claim and the epoch outputs updated with their hash and proofs. In case // the epoch has no outputs, there are no proofs and it returns the pristine // claim for the first epoch or the previous epoch claim otherwise. func (v *Service) createClaimAndProofs( ctx context.Context, - epoch Epoch, -) (*Hash, []Output, error) { - epochOutputs, err := v.repository.GetOutputsProducedInBlockRange( + appAddress string, + epoch *Epoch, +) (*common.Hash, []*Output, error) { + epochOutputs, err := getOutputsProducedInBlockRange( ctx, - epoch.AppAddress, + v.repository, + appAddress, epoch.FirstBlock, epoch.LastBlock, ) if err != nil { return nil, nil, fmt.Errorf( - "failed to get outputs for epoch %v of application %v. %w", - epoch.Index, epoch.AppAddress, err, + "failed to get outputs for epoch %v (%v) of application %v. %w", + epoch.Index, epoch.VirtualIndex, appAddress, err, ) } - previousEpoch, err := v.repository.GetPreviousEpoch(ctx, epoch) - if err != nil { - return nil, nil, fmt.Errorf( - "failed to get previous epoch for epoch %v of application %v. %w", - epoch.Index, epoch.AppAddress, err, - ) + var previousEpoch *Epoch + if epoch.VirtualIndex > 0 { + previousEpoch, err = v.repository.GetEpochByVirtualIndex(ctx, appAddress, epoch.VirtualIndex-1) + if err != nil { + return nil, nil, fmt.Errorf( + "failed to get previous epoch for epoch %v (%v) of application %v. %w", + epoch.Index, epoch.VirtualIndex, appAddress, err, + ) + } } // if there are no outputs @@ -258,39 +293,40 @@ func (v *Service) createClaimAndProofs( claim, _, err := merkle.CreateProofs(nil, MAX_OUTPUT_TREE_HEIGHT) if err != nil { return nil, nil, fmt.Errorf( - "failed to create proofs for epoch %v of application %v. %w", - epoch.Index, epoch.AppAddress, err, + "failed to create proofs for epoch %v (%v) of application %v. %w", + epoch.Index, epoch.VirtualIndex, appAddress, err, ) } return &claim, nil, nil } } else { // if epoch has outputs, calculate a new claim and proofs - var previousOutputs []Output + var previousOutputs []*Output if previousEpoch != nil { // get all outputs created before the current epoch - previousOutputs, err = v.repository.GetOutputsProducedInBlockRange( + previousOutputs, err = getOutputsProducedInBlockRange( ctx, - epoch.AppAddress, + v.repository, + appAddress, 0, // Current implementation requires all outputs previousEpoch.LastBlock, ) if err != nil { return nil, nil, fmt.Errorf( "failed to get all outputs of application %v before epoch %d. %w", - epoch.AppAddress, epoch.Index, err, + appAddress, epoch.Index, err, ) } } // the leaves of the Merkle tree are the Keccak256 hashes of all the // outputs - leaves := make([]Hash, 0, len(epochOutputs)+len(previousOutputs)) + leaves := make([]common.Hash, 0, len(epochOutputs)+len(previousOutputs)) for idx := range previousOutputs { if previousOutputs[idx].Hash == nil { // should never happen return nil, nil, fmt.Errorf( "missing hash of output %d from input %d", - previousOutputs[idx].Index, previousOutputs[idx].InputId, + previousOutputs[idx].Index, previousOutputs[idx].InputIndex, ) } leaves = append(leaves, *previousOutputs[idx].Hash) @@ -307,7 +343,7 @@ func (v *Service) createClaimAndProofs( if err != nil { return nil, nil, fmt.Errorf( "failed to create proofs for epoch %d of application %v. %w", - epoch.Index, epoch.AppAddress, err, + epoch.Index, appAddress, err, ) } diff --git a/pkg/emulator/emulator_test.go b/pkg/emulator/emulator_test.go index ef12a30d4..820d85876 100644 --- a/pkg/emulator/emulator_test.go +++ b/pkg/emulator/emulator_test.go @@ -213,7 +213,7 @@ type RemoteMachineSuite struct{ suite.Suite } func (s *RemoteMachineSuite) TestNew() { require := s.Require() - // Launchs the remote server. + // Launches the remote server. cmd, serverAddress := launchRemoteServer(s.T()) defer func() { require.Nil(cmd.Process.Kill()) }() @@ -251,7 +251,7 @@ func (s *RemoteMachineSuite) TestNew() { func (s *RemoteMachineSuite) TestHappyPath() { require := s.Require() - // Launchs the remote server. + // Launches the remote server. cmd, serverAddress := launchRemoteServer(s.T()) defer func() { require.Nil(cmd.Process.Kill()) }() diff --git a/pkg/ethutil/ethutil.go b/pkg/ethutil/ethutil.go index e95f808ef..3e05df7d0 100644 --- a/pkg/ethutil/ethutil.go +++ b/pkg/ethutil/ethutil.go @@ -96,12 +96,12 @@ func DeploySelfHostedApplication( func AddInput( ctx context.Context, client *ethclient.Client, - book *addresses.Book, + inputBoxAddress common.Address, application common.Address, signer Signer, input []byte, ) (uint64, uint64, error) { - inputBox, err := iinputbox.NewIInputBox(book.InputBox, client) + inputBox, err := iinputbox.NewIInputBox(inputBoxAddress, client) if err != nil { return 0, 0, fmt.Errorf("failed to connect to InputBox contract: %v", err) } @@ -114,18 +114,18 @@ func AddInput( if err != nil { return 0, 0, err } - index, err := getInputIndex(book, inputBox, receipt) + index, err := getInputIndex(inputBoxAddress, inputBox, receipt) return index, receipt.BlockNumber.Uint64(), nil } // Get input index in the transaction by looking at the event logs. func getInputIndex( - book *addresses.Book, + inputBoxAddress common.Address, inputBox *iinputbox.IInputBox, receipt *types.Receipt, ) (uint64, error) { for _, log := range receipt.Logs { - if log.Address != book.InputBox { + if log.Address != inputBoxAddress { continue } inputAdded, err := inputBox.ParseInputAdded(*log) diff --git a/pkg/ethutil/ethutil_test.go b/pkg/ethutil/ethutil_test.go index a9cf301a0..e94266443 100644 --- a/pkg/ethutil/ethutil_test.go +++ b/pkg/ethutil/ethutil_test.go @@ -88,7 +88,7 @@ func (s *EthUtilSuite) TestAddInput() { go func() { waitGroup.Done() - inputIndex, _, err := AddInput(s.ctx, s.client, s.book, s.appAddr, s.signer, payload) + inputIndex, _, err := AddInput(s.ctx, s.client, s.book.InputBox, s.appAddr, s.signer, payload) if err != nil { errChan <- err return diff --git a/pkg/inspectclient/generated.go b/pkg/inspectclient/generated.go index 991f7c097..67167805c 100644 --- a/pkg/inspectclient/generated.go +++ b/pkg/inspectclient/generated.go @@ -137,9 +137,6 @@ func WithRequestEditorFn(fn RequestEditorFn) ClientOption { type ClientInterface interface { // InspectPostWithBody request with any body InspectPostWithBody(ctx context.Context, dapp string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) - - // Inspect request - Inspect(ctx context.Context, dapp string, payload string, reqEditors ...RequestEditorFn) (*http.Response, error) } func (c *Client) InspectPostWithBody(ctx context.Context, dapp string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { @@ -154,18 +151,6 @@ func (c *Client) InspectPostWithBody(ctx context.Context, dapp string, contentTy return c.Client.Do(req) } -func (c *Client) Inspect(ctx context.Context, dapp string, payload string, reqEditors ...RequestEditorFn) (*http.Response, error) { - req, err := NewInspectRequest(c.Server, dapp, payload) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if err := c.applyEditors(ctx, req, reqEditors); err != nil { - return nil, err - } - return c.Client.Do(req) -} - // NewInspectPostRequestWithBody generates requests for InspectPost with any type of body func NewInspectPostRequestWithBody(server string, dapp string, contentType string, body io.Reader) (*http.Request, error) { var err error @@ -202,47 +187,6 @@ func NewInspectPostRequestWithBody(server string, dapp string, contentType strin return req, nil } -// NewInspectRequest generates requests for Inspect -func NewInspectRequest(server string, dapp string, payload string) (*http.Request, error) { - var err error - - var pathParam0 string - - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "dapp", runtime.ParamLocationPath, dapp) - if err != nil { - return nil, err - } - - var pathParam1 string - - pathParam1, err = runtime.StyleParamWithLocation("simple", false, "payload", runtime.ParamLocationPath, payload) - if err != nil { - return nil, err - } - - serverURL, err := url.Parse(server) - if err != nil { - return nil, err - } - - operationPath := fmt.Sprintf("inspect/%s/%s", pathParam0, pathParam1) - if operationPath[0] == '/' { - operationPath = "." + operationPath - } - - queryURL, err := serverURL.Parse(operationPath) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", queryURL.String(), nil) - if err != nil { - return nil, err - } - - return req, nil -} - func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { for _, r := range c.RequestEditors { if err := r(ctx, req); err != nil { @@ -288,9 +232,6 @@ func WithBaseURL(baseURL string) ClientOption { type ClientWithResponsesInterface interface { // InspectPostWithBodyWithResponse request with any body InspectPostWithBodyWithResponse(ctx context.Context, dapp string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*InspectPostResponse, error) - - // InspectWithResponse request - InspectWithResponse(ctx context.Context, dapp string, payload string, reqEditors ...RequestEditorFn) (*InspectResponse, error) } type InspectPostResponse struct { @@ -315,28 +256,6 @@ func (r InspectPostResponse) StatusCode() int { return 0 } -type InspectResponse struct { - Body []byte - HTTPResponse *http.Response - JSON200 *InspectResult -} - -// Status returns HTTPResponse.Status -func (r InspectResponse) Status() string { - if r.HTTPResponse != nil { - return r.HTTPResponse.Status - } - return http.StatusText(0) -} - -// StatusCode returns HTTPResponse.StatusCode -func (r InspectResponse) StatusCode() int { - if r.HTTPResponse != nil { - return r.HTTPResponse.StatusCode - } - return 0 -} - // InspectPostWithBodyWithResponse request with arbitrary body returning *InspectPostResponse func (c *ClientWithResponses) InspectPostWithBodyWithResponse(ctx context.Context, dapp string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*InspectPostResponse, error) { rsp, err := c.InspectPostWithBody(ctx, dapp, contentType, body, reqEditors...) @@ -346,15 +265,6 @@ func (c *ClientWithResponses) InspectPostWithBodyWithResponse(ctx context.Contex return ParseInspectPostResponse(rsp) } -// InspectWithResponse request returning *InspectResponse -func (c *ClientWithResponses) InspectWithResponse(ctx context.Context, dapp string, payload string, reqEditors ...RequestEditorFn) (*InspectResponse, error) { - rsp, err := c.Inspect(ctx, dapp, payload, reqEditors...) - if err != nil { - return nil, err - } - return ParseInspectResponse(rsp) -} - // ParseInspectPostResponse parses an HTTP response from a InspectPostWithResponse call func ParseInspectPostResponse(rsp *http.Response) (*InspectPostResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -380,29 +290,3 @@ func ParseInspectPostResponse(rsp *http.Response) (*InspectPostResponse, error) return response, nil } - -// ParseInspectResponse parses an HTTP response from a InspectWithResponse call -func ParseInspectResponse(rsp *http.Response) (*InspectResponse, error) { - bodyBytes, err := io.ReadAll(rsp.Body) - defer func() { _ = rsp.Body.Close() }() - if err != nil { - return nil, err - } - - response := &InspectResponse{ - Body: bodyBytes, - HTTPResponse: rsp, - } - - switch { - case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest InspectResult - if err := json.Unmarshal(bodyBytes, &dest); err != nil { - return nil, err - } - response.JSON200 = &dest - - } - - return response, nil -} diff --git a/pkg/rollupsmachine/cartesimachine/server.go b/pkg/rollupsmachine/cartesimachine/server.go index 31869b37c..c192bd371 100644 --- a/pkg/rollupsmachine/cartesimachine/server.go +++ b/pkg/rollupsmachine/cartesimachine/server.go @@ -67,7 +67,7 @@ func StartServer(logger *slog.Logger, verbosity ServerVerbosity, port uint32, st if actualPort := <-interceptor.port; port == 0 { port = actualPort } else if port != actualPort { - panic(fmt.Sprintf("mismatching ports (%d != %d)", port, actualPort)) + return "", fmt.Errorf("mismatching ports (%d != %d)", port, actualPort) } return fmt.Sprintf("127.0.0.1:%d", port), nil diff --git a/pkg/rollupsmachine/error.go b/pkg/rollupsmachine/error.go index 9dd9d8095..17fc49cf7 100644 --- a/pkg/rollupsmachine/error.go +++ b/pkg/rollupsmachine/error.go @@ -6,8 +6,6 @@ package rollupsmachine import ( "errors" "fmt" - - "github.com/cartesi/rollups-node/internal/model" ) var ( @@ -25,5 +23,5 @@ var ( ErrNotAtManualYield = errors.New("not at manual yield") // Advance - ErrHashLength = fmt.Errorf("hash does not have exactly %d bytes", model.HashLength) + ErrHashLength = fmt.Errorf("hash does not have the exactly number of bytes") ) diff --git a/pkg/service/service.go b/pkg/service/service.go index f10f094f4..56c74bf63 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -46,7 +46,8 @@ // }, // }, &s) // if err != nil { -// panic(err) +// s.Logger.Error("Fatal", "error", err) +// os.Exit(1) // } // s.CreateDefaultHandlers("/" + s.Name) // s.Serve() @@ -274,21 +275,15 @@ func (s *Service) String() string { } func NewLogger(level slog.Level, pretty bool) *slog.Logger { - logger := &slog.Logger{} - if pretty { - opts := &tint.Options{ - Level: level, - AddSource: level == slog.LevelDebug, - // RFC3339 with milliseconds and without timezone - TimeFormat: "2006-01-02T15:04:05.000", - NoColor: !pretty, - } - handler := tint.NewHandler(os.Stdout, opts) - logger = slog.New(handler) - } else { - logger = slog.Default() + opts := &tint.Options{ + Level: level, + AddSource: level == slog.LevelDebug, + // RFC3339 with milliseconds and without timezone + TimeFormat: "2006-01-02T15:04:05.000", + NoColor: !pretty, } - return logger + handler := tint.NewHandler(os.Stdout, opts) + return slog.New(handler) } func WithTimeout(limit time.Duration, fn func() error) error { diff --git a/test/tooling/db/db.go b/test/tooling/db/db.go index 44db648f7..22923ab57 100644 --- a/test/tooling/db/db.go +++ b/test/tooling/db/db.go @@ -7,7 +7,7 @@ import ( "fmt" "os" - "github.com/cartesi/rollups-node/internal/repository/schema" + "github.com/cartesi/rollups-node/internal/repository/postgres/schema" ) func GetPostgresTestEndpoint() (string, error) {