From ee9e2e2b2d58c7bde9463ee204aad1c2d0ee5942 Mon Sep 17 00:00:00 2001 From: Morgan Date: Thu, 25 Apr 2024 17:22:20 +0200 Subject: [PATCH 1/6] feat(gnoland): pass genesis file as a flag (#1972) - Depends on #1944 (git history is based on that one) - Closes #1883
Contributors' checklist... - [ ] Added new tests, or not needed, or not feasible - [ ] Provided an example (e.g. screenshot) to aid review or the PR is self-explanatory - [ ] Updated the official documentation or not needed - [ ] No breaking changes were made, or a `BREAKING CHANGE: xxx` message was included in the description - [ ] Added references to related issues and PRs - [ ] Provided any useful hints for running manual tests - [ ] Added new benchmarks to [generated graphs](https://gnoland.github.io/benchmarks), if any. More info [here](https://github.com/gnolang/gno/blob/master/.benchmarks/README.md).
--------- Co-authored-by: Milos Zivkovic --- gno.land/cmd/gnoland/config_get_test.go | 7 ------- gno.land/cmd/gnoland/config_set_test.go | 10 ---------- gno.land/cmd/gnoland/start.go | 26 ++++++++++++++++++------- gno.land/cmd/gnoland/start_test.go | 13 ++++++++++++- tm2/pkg/bft/config/config.go | 11 ----------- tm2/pkg/bft/config/config_test.go | 9 --------- tm2/pkg/bft/config/toml.go | 6 +++++- tm2/pkg/bft/config/toml_test.go | 3 +-- tm2/pkg/bft/consensus/replay_file.go | 2 -- tm2/pkg/bft/rpc/test/helpers.go | 6 +++--- tm2/pkg/bft/store/store_test.go | 2 +- 11 files changed, 41 insertions(+), 54 deletions(-) diff --git a/gno.land/cmd/gnoland/config_get_test.go b/gno.land/cmd/gnoland/config_get_test.go index 940842516d0..e8c27045205 100644 --- a/gno.land/cmd/gnoland/config_get_test.go +++ b/gno.land/cmd/gnoland/config_get_test.go @@ -129,13 +129,6 @@ func TestConfig_Get_Base(t *testing.T) { assert.Equal(t, loadedCfg.DBPath, value) }, }, - { - "genesis path fetched", - "genesis_file", - func(loadedCfg *config.Config, value string) { - assert.Equal(t, loadedCfg.Genesis, value) - }, - }, { "validator key fetched", "priv_validator_key_file", diff --git a/gno.land/cmd/gnoland/config_set_test.go b/gno.land/cmd/gnoland/config_set_test.go index 87cfbfdfc4a..b0898194d64 100644 --- a/gno.land/cmd/gnoland/config_set_test.go +++ b/gno.land/cmd/gnoland/config_set_test.go @@ -184,16 +184,6 @@ func TestConfig_Set_Base(t *testing.T) { assert.Equal(t, value, loadedCfg.DBPath) }, }, - { - "genesis path updated", - []string{ - "genesis_file", - "example path", - }, - func(loadedCfg *config.Config, value string) { - assert.Equal(t, value, loadedCfg.Genesis) - }, - }, { "validator key updated", []string{ diff --git a/gno.land/cmd/gnoland/start.go b/gno.land/cmd/gnoland/start.go index 41f4e7c2f44..0cbbc6cb36d 100644 --- a/gno.land/cmd/gnoland/start.go +++ b/gno.land/cmd/gnoland/start.go @@ -43,6 +43,7 @@ type startCfg struct { skipStart bool genesisBalancesFile string genesisTxsFile string + genesisFile string chainID string genesisRemote string dataDir string @@ -106,6 +107,13 @@ func (c *startCfg) RegisterFlags(fs *flag.FlagSet) { "initial txs to replay", ) + fs.StringVar( + &c.genesisFile, + "genesis", + "genesis.json", + "the path to the genesis.json", + ) + fs.StringVar( &c.chainID, "chainid", @@ -200,6 +208,12 @@ func execStart(c *startCfg, io commands.IO) error { return fmt.Errorf("unable to get absolute path for data directory, %w", err) } + // Get the absolute path to the node's genesis.json + genesisPath, err := filepath.Abs(c.genesisFile) + if err != nil { + return fmt.Errorf("unable to get absolute path for the genesis.json, %w", err) + } + var ( cfg *config.Config loadCfgErr error @@ -241,11 +255,9 @@ func execStart(c *startCfg, io commands.IO) error { logger := log.ZapLoggerToSlog(zapLogger) // Write genesis file if missing. - // NOTE: this will be dropped in a PR that resolves issue #1883: - // https://github.com/gnolang/gno/issues/1883 - genesisFilePath := filepath.Join(nodeDir, "../", "genesis.json") - - if !osm.FileExists(genesisFilePath) { + // NOTE: this will be dropped in a PR that resolves issue #1886: + // https://github.com/gnolang/gno/issues/1886 + if !osm.FileExists(genesisPath) { // Create priv validator first. // Need it to generate genesis.json newPrivValKey := cfg.PrivValidatorKeyFile() @@ -254,7 +266,7 @@ func execStart(c *startCfg, io commands.IO) error { pk := priv.GetPubKey() // Generate genesis.json file - if err := generateGenesisFile(genesisFilePath, pk, c); err != nil { + if err := generateGenesisFile(genesisPath, pk, c); err != nil { return fmt.Errorf("unable to generate genesis file: %w", err) } } @@ -277,7 +289,7 @@ func execStart(c *startCfg, io commands.IO) error { io.Println(startGraphic) } - gnoNode, err := node.DefaultNewNode(cfg, genesisFilePath, logger) + gnoNode, err := node.DefaultNewNode(cfg, genesisPath, logger) if err != nil { return fmt.Errorf("error in creating node: %w", err) } diff --git a/gno.land/cmd/gnoland/start_test.go b/gno.land/cmd/gnoland/start_test.go index 2f266d8a879..cdec6de0f99 100644 --- a/gno.land/cmd/gnoland/start_test.go +++ b/gno.land/cmd/gnoland/start_test.go @@ -3,6 +3,7 @@ package main import ( "bytes" "context" + "path/filepath" "testing" "time" @@ -14,15 +15,24 @@ import ( func TestStartInitialize(t *testing.T) { t.Parallel() + // NOTE: cannot be txtar tests as they use their own parsing for the + // "gnoland" command line. See pkg/integration. + var ( - nodeDir = t.TempDir() + nodeDir = t.TempDir() + genesisFile = filepath.Join(nodeDir, "test_genesis.json") args = []string{ "start", "--skip-start", "--skip-failing-genesis-txs", + + // These two flags are tested together as they would otherwise + // pollute this directory (cmd/gnoland) if not set. "--data-dir", nodeDir, + "--genesis", + genesisFile, } ) @@ -42,4 +52,5 @@ func TestStartInitialize(t *testing.T) { // Make sure the directory is created assert.DirExists(t, nodeDir) + assert.FileExists(t, genesisFile) } diff --git a/tm2/pkg/bft/config/config.go b/tm2/pkg/bft/config/config.go index 8668dde5003..117ce36e96b 100644 --- a/tm2/pkg/bft/config/config.go +++ b/tm2/pkg/bft/config/config.go @@ -22,7 +22,6 @@ var ( errInvalidMoniker = errors.New("moniker not set") errInvalidDBBackend = errors.New("invalid DB backend") errInvalidDBPath = errors.New("invalid DB path") - errInvalidGenesisPath = errors.New("invalid genesis path") errInvalidPrivValidatorKeyPath = errors.New("invalid private validator key path") errInvalidPrivValidatorStatePath = errors.New("invalid private validator state file path") errInvalidABCIMechanism = errors.New("invalid ABCI mechanism") @@ -205,7 +204,6 @@ var ( defaultSecretsDir = "secrets" defaultConfigFileName = "config.toml" - defaultGenesisJSONName = "genesis.json" defaultNodeKeyName = "node_key.json" defaultPrivValKeyName = "priv_validator_key.json" defaultPrivValStateName = "priv_validator_state.json" @@ -271,9 +269,6 @@ type BaseConfig struct { // Database directory DBPath string `toml:"db_dir" comment:"Database directory"` - // Path to the JSON file containing the initial validator set and other meta data - Genesis string `toml:"genesis_file" comment:"Path to the JSON file containing the initial validator set and other meta data"` - // Path to the JSON file containing the private key to use as a validator in the consensus protocol PrivValidatorKey string `toml:"priv_validator_key_file" comment:"Path to the JSON file containing the private key to use as a validator in the consensus protocol"` @@ -301,7 +296,6 @@ type BaseConfig struct { // DefaultBaseConfig returns a default base configuration for a Tendermint node func DefaultBaseConfig() BaseConfig { return BaseConfig{ - Genesis: defaultGenesisJSONName, PrivValidatorKey: defaultPrivValKeyPath, PrivValidatorState: defaultPrivValStatePath, NodeKey: defaultNodeKeyPath, @@ -382,11 +376,6 @@ func (cfg BaseConfig) ValidateBasic() error { return errInvalidDBPath } - // Verify the genesis path is set - if cfg.Genesis == "" { - return errInvalidGenesisPath - } - // Verify the validator private key path is set if cfg.PrivValidatorKey == "" { return errInvalidPrivValidatorKeyPath diff --git a/tm2/pkg/bft/config/config_test.go b/tm2/pkg/bft/config/config_test.go index e1e439ac9c0..541b5591985 100644 --- a/tm2/pkg/bft/config/config_test.go +++ b/tm2/pkg/bft/config/config_test.go @@ -128,15 +128,6 @@ func TestConfig_ValidateBaseConfig(t *testing.T) { assert.ErrorIs(t, c.BaseConfig.ValidateBasic(), errInvalidDBPath) }) - t.Run("genesis path not set", func(t *testing.T) { - t.Parallel() - - c := DefaultConfig() - c.Genesis = "" - - assert.ErrorIs(t, c.BaseConfig.ValidateBasic(), errInvalidGenesisPath) - }) - t.Run("priv validator key path not set", func(t *testing.T) { t.Parallel() diff --git a/tm2/pkg/bft/config/toml.go b/tm2/pkg/bft/config/toml.go index 474fce0e8a3..5d8589394a0 100644 --- a/tm2/pkg/bft/config/toml.go +++ b/tm2/pkg/bft/config/toml.go @@ -81,7 +81,11 @@ func ResetTestRoot(testName string) (*Config, string) { baseConfig := DefaultBaseConfig() configFilePath := filepath.Join(rootDir, defaultConfigPath) - genesisFilePath := filepath.Join(rootDir, defaultGenesisJSONName) + // NOTE: this does not match the behaviour of the Gno.land node. + // However, many tests rely on the fact that they can cleanup the directory + // by doing RemoveAll on the rootDir; so to keep compatibility with that + // behaviour, we place genesis.json in the rootDir. + genesisFilePath := filepath.Join(rootDir, "genesis.json") privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey) privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState) diff --git a/tm2/pkg/bft/config/toml_test.go b/tm2/pkg/bft/config/toml_test.go index 53dea1f2dfe..3520bcebc9f 100644 --- a/tm2/pkg/bft/config/toml_test.go +++ b/tm2/pkg/bft/config/toml_test.go @@ -68,8 +68,8 @@ func TestEnsureTestRoot(t *testing.T) { ensureFiles( t, rootDir, + "genesis.json", DefaultDBDir, - baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState, ) @@ -93,7 +93,6 @@ func checkConfig(configFile string) bool { "wal", "propose", "max", - "genesis", } for _, e := range elems { if !strings.Contains(configFile, e) { diff --git a/tm2/pkg/bft/consensus/replay_file.go b/tm2/pkg/bft/consensus/replay_file.go index 03ddb2d2722..701c2893053 100644 --- a/tm2/pkg/bft/consensus/replay_file.go +++ b/tm2/pkg/bft/consensus/replay_file.go @@ -249,5 +249,3 @@ func (pb *playback) replayConsoleLoop() int { } } } - -// -------------------------------------------------------------------------------- diff --git a/tm2/pkg/bft/rpc/test/helpers.go b/tm2/pkg/bft/rpc/test/helpers.go index 39078eaaf7f..d934cf27a64 100644 --- a/tm2/pkg/bft/rpc/test/helpers.go +++ b/tm2/pkg/bft/rpc/test/helpers.go @@ -92,7 +92,7 @@ func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node { for _, opt := range opts { opt(&nodeOpts) } - node := NewTendermint(app, &nodeOpts) + node := newTendermint(app, &nodeOpts) err := node.Start() if err != nil { panic(err) @@ -112,8 +112,8 @@ func StopTendermint(node *nm.Node) { os.RemoveAll(node.Config().RootDir) } -// NewTendermint creates a new tendermint server and sleeps forever -func NewTendermint(app abci.Application, opts *Options) *nm.Node { +// newTendermint creates a new tendermint server and sleeps forever +func newTendermint(app abci.Application, opts *Options) *nm.Node { // Create & start node config, genesisFile := GetConfig(opts.recreateConfig) diff --git a/tm2/pkg/bft/store/store_test.go b/tm2/pkg/bft/store/store_test.go index 6ea85592ac9..2e634681ecc 100644 --- a/tm2/pkg/bft/store/store_test.go +++ b/tm2/pkg/bft/store/store_test.go @@ -45,7 +45,7 @@ func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Bl return block } -func makeStateAndBlockStore(logger *slog.Logger) (sm.State, *BlockStore, cleanupFunc) { +func makeStateAndBlockStore(_ *slog.Logger) (sm.State, *BlockStore, cleanupFunc) { config, genesisFile := cfg.ResetTestRoot("blockchain_reactor_test") // blockDB := dbm.NewDebugDB("blockDB", memdb.NewMemDB()) // stateDB := dbm.NewDebugDB("stateDB", memdb.NewMemDB()) From 6c8f4552acabb4ef449169798e6d146aef97fb42 Mon Sep 17 00:00:00 2001 From: Guilhem Fanton <8671905+gfanton@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:43:54 +0200 Subject: [PATCH 2/6] fix: keep data as default gnohome keybase db (#1984) --- contribs/gnokeykc/go.mod | 1 - tm2/pkg/crypto/keys/utils.go | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contribs/gnokeykc/go.mod b/contribs/gnokeykc/go.mod index 06e4db7a355..c0b4a874576 100644 --- a/contribs/gnokeykc/go.mod +++ b/contribs/gnokeykc/go.mod @@ -10,7 +10,6 @@ require ( ) require ( - dario.cat/mergo v1.0.0 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect github.com/btcsuite/btcd/btcutil v1.1.5 // indirect diff --git a/tm2/pkg/crypto/keys/utils.go b/tm2/pkg/crypto/keys/utils.go index c848fe574bd..21ec1b5f491 100644 --- a/tm2/pkg/crypto/keys/utils.go +++ b/tm2/pkg/crypto/keys/utils.go @@ -3,15 +3,16 @@ package keys import ( "fmt" "path/filepath" - - "github.com/gnolang/gno/tm2/pkg/bft/config" ) -const defaultKeyDBName = "keys" +const ( + defaultKeyDBName = "keys" + defaultKeyDBDir = "data" +) // NewKeyBaseFromDir initializes a keybase at a particular dir. func NewKeyBaseFromDir(rootDir string) (Keybase, error) { - return NewLazyDBKeybase(defaultKeyDBName, filepath.Join(rootDir, config.DefaultDBDir)), nil + return NewLazyDBKeybase(defaultKeyDBName, filepath.Join(rootDir, defaultKeyDBDir)), nil } func ValidateMultisigThreshold(k, nKeys int) error { From 0ba95bf33b2334974ccada7189b2f67702e16937 Mon Sep 17 00:00:00 2001 From: Manfred Touron <94029+moul@users.noreply.github.com> Date: Thu, 25 Apr 2024 18:24:24 +0200 Subject: [PATCH 3/6] chore: archive old demo (#1940) --- examples/gno.land/p/{demo => archives}/bank/gno.mod | 0 examples/gno.land/p/{demo => archives}/bank/types.gno | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename examples/gno.land/p/{demo => archives}/bank/gno.mod (100%) rename examples/gno.land/p/{demo => archives}/bank/types.gno (100%) diff --git a/examples/gno.land/p/demo/bank/gno.mod b/examples/gno.land/p/archives/bank/gno.mod similarity index 100% rename from examples/gno.land/p/demo/bank/gno.mod rename to examples/gno.land/p/archives/bank/gno.mod diff --git a/examples/gno.land/p/demo/bank/types.gno b/examples/gno.land/p/archives/bank/types.gno similarity index 100% rename from examples/gno.land/p/demo/bank/types.gno rename to examples/gno.land/p/archives/bank/types.gno From 8cc56366f21c3165f31b387ab1092230ce4643da Mon Sep 17 00:00:00 2001 From: piux2 <90544084+piux2@users.noreply.github.com> Date: Thu, 25 Apr 2024 15:58:13 -0600 Subject: [PATCH 4/6] fix: consolidate vm gas consumption (#1430)
Contributors' checklist... - [x] Added new tests - [ ] Provided an example (e.g. screenshot) to aid review or the PR is self-explanatory - [ ] Updated the official documentation or not needed - [x] No breaking changes were made - [x] Added references to related issues and PRs - [ ] Provided any useful hints for running manual tests - [ ] Added new benchmarks to [generated graphs](https://gnoland.github.io/benchmarks), if any. More info [here](https://github.com/gnolang/gno/blob/master/.benchmarks/README.md).
Ref: #1070 #1067 #649 #1281 ## Summary The current gno.land node, optimized for development purposes, has a simplified verification process and gas meter implementation. To transition the gno.land node to a production-ready state, it is necessary to implement a comprehensive gas metering system that accurately accounts for VM gas consumption. This includes refining the gas fee structure to encompass all relevant costs, ensuring robust transaction validation, and calculating gas consumption based on actual computational load. This PR aims to address these limitations by introducing a complete gas meter and validation flow, laying the groundwork for further gas meter profiling and configuration. ## Problem Definition Current State and Limitations for Production: - **VM Gas Consumption Not Accounted in Gas Meter:** The current gas meter fails to calculate VM gas consumption, potentially allowing heavy contract loads without corresponding gas meter deductions. A refined system should measure and charge for VM gas usage accurately. - **Gas Fee Structure:** Presently, the gas fee structure only includes storage access, transaction size, and signature verification. VM gas fees are levied as a separate, flat fee, which might lead to confusion among users expecting the total fee to match the amount specified in the 'gas-fee' argument. For improved transparency and precision, the gas fee structure should integrate all these aspects. - **Transaction Validation:** The system currently validates basic information for VM msg_addpkg and msg_call. However, gas consumption cannot be determined before fully executing these messages against the VM. Consequently, VM transactions are placed in the mempool and propagated to other nodes, even if they may not meet the gas fee requirements to execute these transactions. This undermines the purpose of using gas fees to prevent VM spamming. ## Solution: ( Updated ) This is a high-level description of the implemented features: ~~Added an anteHandler in VM to monitor gas consumption~~ ~~Implemented chained VM anteHandler in auth.anteHandler~~ - Consume gas to verify account, signature and tx size in CheckTx - Consume VM gas in DeliverTx - Accumulated VM CPU cycles, memory allocation, store access, transaction size, and signature verification into a single gas meter. - Enabled local node checks of VM resource usage. The VM message is only aborted if it runs out of gas in basic CheckTx. However, the message is still propagated to other nodes if execution fails to prevent censorship - Introduced a structured format for logging gas consumption for profiling and metrics. - Introduced a gas factor linking gas to vm CPU cycles and memory allocation to balance between vm gas consumption with the rest. ## Trade-offs and Future Optimization: ( Updated ) ~~The current implementation processes messages against the VM to check gas consumption in abci.CheckTx() before inclusion in the mempool and propagation to other nodes.~~ ~~Messages lacking sufficient gas-wanted will be dropped, preventing abuse without adequate gas fees. However, the trade-off is that for each message with enough gas, the VM executes the transaction twice: once in CheckTx() and once in DeliverTx(). As these occur in separate execution contexts and are not in synchronized sequence, the performance impact is currently a secondary concern.~~ We moved the VM gas check from CheckTx to DeliverTx for the following reasons: - We only know the VM gas consumption after the messages have been processed. - Running VM execution for many CheckTx requests from the peers could overload the mempool that is executing CheckTx. - This could slow down the propagation of transactions across the entire network. By moving the VM gas check from CheckTx to DeliverTx, we are able to reduce the load on the mempool of a node and allow transactions to propagate through the network faster. In the future, we may use a predicted median value instead of the exact value from transaction execution for efficiency. ## What's Next: - Add a minimum gas price flag and configuration for node operation. - Provide a user-friendly fee input interface, offering 'gas-wanted' and 'gas price' as alternatives to the current 'gas-wanted' and 'gas-fee' inputs. - Tune the gas factor based on VM CPU and Memory Profiling. The current factor is 1:1 between gas and VM CPU cycles and memory allocation. --------- Co-authored-by: Thomas Bruyelle --- gno.land/cmd/gnoland/testdata/addpkg.txtar | 40 +++-- gno.land/pkg/gnoland/app.go | 2 +- gno.land/pkg/sdk/vm/gas_test.go | 177 +++++++++++++++++++++ gno.land/pkg/sdk/vm/handler.go | 33 +--- gno.land/pkg/sdk/vm/keeper.go | 139 ++++++++++------ gnovm/pkg/gnolang/alloc.go | 1 + gnovm/pkg/gnolang/machine.go | 22 ++- tm2/pkg/sdk/auth/ante.go | 4 + tm2/pkg/store/gas/store.go | 14 +- 9 files changed, 332 insertions(+), 100 deletions(-) create mode 100644 gno.land/pkg/sdk/vm/gas_test.go diff --git a/gno.land/cmd/gnoland/testdata/addpkg.txtar b/gno.land/cmd/gnoland/testdata/addpkg.txtar index e7437552b50..071096cb49d 100644 --- a/gno.land/cmd/gnoland/testdata/addpkg.txtar +++ b/gno.land/cmd/gnoland/testdata/addpkg.txtar @@ -3,21 +3,35 @@ ## start a new node gnoland start -## add bar.gno package located in $WORK directory as gno.land/r/foobar/bar -gnokey maketx addpkg -pkgdir $WORK -pkgpath gno.land/r/foobar/bar -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test test1 +## add hello.gno package located in $WORK directory as gno.land/r/hello +gnokey maketx addpkg -pkgdir $WORK -pkgpath gno.land/r/hello -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test test1 -## execute Render -gnokey maketx call -pkgpath gno.land/r/foobar/bar -func Render -gas-fee 1000000ugnot -gas-wanted 2000000 -args '' -broadcast -chainid=tendermint_test test1 +## compare AddPkg +cmp stdout stdout.addpkg.success -## compare render -stdout '("hello from foo" string)' -stdout 'OK!' -stdout 'GAS WANTED: 2000000' -stdout 'GAS USED: [0-9]+' --- bar.gno -- -package bar +## execute SayHello +gnokey maketx call -pkgpath gno.land/r/hello -func SayHello -gas-fee 1000000ugnot -gas-wanted 2000000 -broadcast -chainid=tendermint_test test1 -func Render(path string) string { - return "hello from foo" + +## compare SayHello +cmp stdout stdout.call.success + +-- hello.gno -- +package hello + +func SayHello() string { + return "hello world!" } + + +-- stdout.addpkg.success -- + +OK! +GAS WANTED: 2000000 +GAS USED: 119829 +-- stdout.call.success -- +("hello world!" string) +OK! +GAS WANTED: 2000000 +GAS USED: 52801 diff --git a/gno.land/pkg/gnoland/app.go b/gno.land/pkg/gnoland/app.go index ee007c058a1..03222c7e672 100644 --- a/gno.land/pkg/gnoland/app.go +++ b/gno.land/pkg/gnoland/app.go @@ -66,6 +66,7 @@ func NewAppWithOptions(cfg *AppOptions) (abci.Application, error) { baseKey := store.NewStoreKey("base") // Create BaseApp. + // TODO: Add a consensus based min gas prices for the node, by default it does not check baseApp := sdk.NewBaseApp("gnoland", cfg.Logger, cfg.DB, baseKey, mainKey) baseApp.SetAppVersion("dev") @@ -139,7 +140,6 @@ func NewApp(dataRootDir string, skipFailingGenesisTxs bool, logger *slog.Logger, } cfg.Logger = logger - return NewAppWithOptions(cfg) } diff --git a/gno.land/pkg/sdk/vm/gas_test.go b/gno.land/pkg/sdk/vm/gas_test.go new file mode 100644 index 00000000000..75d13aa6c5d --- /dev/null +++ b/gno.land/pkg/sdk/vm/gas_test.go @@ -0,0 +1,177 @@ +package vm + +import ( + "testing" + + bft "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/crypto" + "github.com/gnolang/gno/tm2/pkg/sdk" + "github.com/gnolang/gno/tm2/pkg/sdk/auth" + "github.com/gnolang/gno/tm2/pkg/std" + "github.com/gnolang/gno/tm2/pkg/store" + "github.com/stretchr/testify/assert" +) + +// Gas for entire tx is consumed in both CheckTx and DeliverTx. +// Gas for executing VM tx (VM CPU and Store Access in bytes) is consumed in DeliverTx. +// Gas for balance checking, message size checking, and signature verification is consumed (deducted) in checkTx. + +// Insufficient gas for a successful message. + +func TestAddPkgDeliverTxInsuffGas(t *testing.T) { + success := true + ctx, tx, vmHandler := setupAddPkg(success) + + ctx = ctx.WithMode(sdk.RunTxModeDeliver) + simulate := false + tx.Fee.GasWanted = 3000 + gctx := auth.SetGasMeter(simulate, ctx, tx.Fee.GasWanted) + + var res sdk.Result + abort := false + + defer func() { + if r := recover(); r != nil { + switch r.(type) { + case store.OutOfGasException: + res.Error = sdk.ABCIError(std.ErrOutOfGas("")) + abort = true + default: + t.Errorf("should panic on OutOfGasException only") + } + assert.True(t, abort) + assert.False(t, res.IsOK()) + gasCheck := gctx.GasMeter().GasConsumed() + assert.Equal(t, int64(3231), gasCheck) + } else { + t.Errorf("should panic") + } + }() + msgs := tx.GetMsgs() + res = vmHandler.Process(gctx, msgs[0]) +} + +// Enough gas for a successful message. +func TestAddPkgDeliverTx(t *testing.T) { + success := true + ctx, tx, vmHandler := setupAddPkg(success) + + var simulate bool + + ctx = ctx.WithMode(sdk.RunTxModeDeliver) + simulate = false + tx.Fee.GasWanted = 500000 + gctx := auth.SetGasMeter(simulate, ctx, tx.Fee.GasWanted) + msgs := tx.GetMsgs() + res := vmHandler.Process(gctx, msgs[0]) + gasDeliver := gctx.GasMeter().GasConsumed() + + assert.True(t, res.IsOK()) + assert.Equal(t, int64(87809), gasDeliver) +} + +// Enough gas for a failed transaction. +func TestAddPkgDeliverTxFailed(t *testing.T) { + success := false + ctx, tx, vmHandler := setupAddPkg(success) + + var simulate bool + + ctx = ctx.WithMode(sdk.RunTxModeDeliver) + simulate = false + tx.Fee.GasWanted = 500000 + gctx := auth.SetGasMeter(simulate, ctx, tx.Fee.GasWanted) + msgs := tx.GetMsgs() + res := vmHandler.Process(gctx, msgs[0]) + gasDeliver := gctx.GasMeter().GasConsumed() + + assert.False(t, res.IsOK()) + assert.Equal(t, int64(17989), gasDeliver) +} + +// Not enough gas for a failed transaction. +func TestAddPkgDeliverTxFailedNoGas(t *testing.T) { + success := false + ctx, tx, vmHandler := setupAddPkg(success) + + var simulate bool + + ctx = ctx.WithMode(sdk.RunTxModeDeliver) + simulate = false + tx.Fee.GasWanted = 17988 + gctx := auth.SetGasMeter(simulate, ctx, tx.Fee.GasWanted) + + var res sdk.Result + abort := false + + defer func() { + if r := recover(); r != nil { + switch r.(type) { + case store.OutOfGasException: + res.Error = sdk.ABCIError(std.ErrOutOfGas("")) + abort = true + default: + t.Errorf("should panic on OutOfGasException only") + } + assert.True(t, abort) + assert.False(t, res.IsOK()) + gasCheck := gctx.GasMeter().GasConsumed() + assert.Equal(t, int64(17989), gasCheck) + } else { + t.Errorf("should panic") + } + }() + + msgs := tx.GetMsgs() + res = vmHandler.Process(gctx, msgs[0]) +} + +// Set up a test env for both a successful and a failed tx +func setupAddPkg(success bool) (sdk.Context, sdk.Tx, vmHandler) { + // setup + env := setupTestEnv() + ctx := env.ctx + // conduct base gas meter tests from a non-genesis block since genesis block use infinite gas meter instead. + ctx = ctx.WithBlockHeader(&bft.Header{Height: int64(1)}) + vmHandler := NewHandler(env.vmk) + // Create an account with 10M ugnot (10gnot) + addr := crypto.AddressFromPreimage([]byte("test1")) + acc := env.acck.NewAccountWithAddress(ctx, addr) + env.acck.SetAccount(ctx, acc) + env.bank.SetCoins(ctx, addr, std.MustParseCoins("10000000ugnot")) + // success message + var files []*std.MemFile + if success { + files = []*std.MemFile{ + { + Name: "hello.gno", + Body: `package hello + +func Echo() string { + return "hello world" +}`, + }, + } + } else { + // failed message + files = []*std.MemFile{ + { + Name: "hello.gno", + Body: `package hello + +func Echo() UnknowType { + return "hello world" +}`, + }, + } + } + + pkgPath := "gno.land/r/hello" + // create messages and a transaction + msg := NewMsgAddPackage(addr, pkgPath, files) + msgs := []std.Msg{msg} + fee := std.NewFee(500000, std.MustParseCoin("1ugnot")) + tx := std.NewTx(msgs, fee, []std.Signature{}, "") + + return ctx, tx, vmHandler +} diff --git a/gno.land/pkg/sdk/vm/handler.go b/gno.land/pkg/sdk/vm/handler.go index 6c3a97696d6..e1dd31846e7 100644 --- a/gno.land/pkg/sdk/vm/handler.go +++ b/gno.land/pkg/sdk/vm/handler.go @@ -6,7 +6,6 @@ import ( abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" "github.com/gnolang/gno/tm2/pkg/sdk" - "github.com/gnolang/gno/tm2/pkg/sdk/auth" "github.com/gnolang/gno/tm2/pkg/std" ) @@ -37,15 +36,7 @@ func (vh vmHandler) Process(ctx sdk.Context, msg std.Msg) sdk.Result { // Handle MsgAddPackage. func (vh vmHandler) handleMsgAddPackage(ctx sdk.Context, msg MsgAddPackage) sdk.Result { - amount, err := std.ParseCoins("1000000ugnot") // XXX calculate - if err != nil { - return abciResult(err) - } - err = vh.vm.bank.SendCoins(ctx, msg.Creator, auth.FeeCollectorAddress(), amount) - if err != nil { - return abciResult(err) - } - err = vh.vm.AddPackage(ctx, msg) + err := vh.vm.AddPackage(ctx, msg) if err != nil { return abciResult(err) } @@ -54,16 +45,7 @@ func (vh vmHandler) handleMsgAddPackage(ctx sdk.Context, msg MsgAddPackage) sdk. // Handle MsgCall. func (vh vmHandler) handleMsgCall(ctx sdk.Context, msg MsgCall) (res sdk.Result) { - amount, err := std.ParseCoins("1000000ugnot") // XXX calculate - if err != nil { - return abciResult(err) - } - err = vh.vm.bank.SendCoins(ctx, msg.Caller, auth.FeeCollectorAddress(), amount) - if err != nil { - return abciResult(err) - } - resstr := "" - resstr, err = vh.vm.Call(ctx, msg) + resstr, err := vh.vm.Call(ctx, msg) if err != nil { return abciResult(err) } @@ -81,16 +63,7 @@ func (vh vmHandler) handleMsgCall(ctx sdk.Context, msg MsgCall) (res sdk.Result) // Handle MsgRun. func (vh vmHandler) handleMsgRun(ctx sdk.Context, msg MsgRun) (res sdk.Result) { - amount, err := std.ParseCoins("1000000ugnot") // XXX calculate - if err != nil { - return abciResult(err) - } - err = vh.vm.bank.SendCoins(ctx, msg.Caller, auth.FeeCollectorAddress(), amount) - if err != nil { - return abciResult(err) - } - resstr := "" - resstr, err = vh.vm.Run(ctx, msg) + resstr, err := vh.vm.Run(ctx, msg) if err != nil { return abciResult(err) } diff --git a/gno.land/pkg/sdk/vm/keeper.go b/gno.land/pkg/sdk/vm/keeper.go index e9223143bf5..cbeee69d938 100644 --- a/gno.land/pkg/sdk/vm/keeper.go +++ b/gno.land/pkg/sdk/vm/keeper.go @@ -96,13 +96,13 @@ func (vm *VMKeeper) Initialize(ms store.MultiStore) { } func (vm *VMKeeper) getGnoStore(ctx sdk.Context) gno.Store { - // construct main gnoStore if nil. + // construct main store if nil. if vm.gnoStore == nil { panic("VMKeeper must first be initialized") } switch ctx.Mode() { case sdk.RunTxModeDeliver: - // swap sdk store of existing gnoStore. + // swap sdk store of existing store. // this is needed due to e.g. gas wrappers. baseSDKStore := ctx.Store(vm.baseKey) iavlSDKStore := ctx.Store(vm.iavlKey) @@ -134,12 +134,12 @@ func (vm *VMKeeper) getGnoStore(ctx sdk.Context) gno.Store { var reRunPath = regexp.MustCompile(`gno\.land/r/g[a-z0-9]+/run`) // AddPackage adds a package with given fileset. -func (vm *VMKeeper) AddPackage(ctx sdk.Context, msg MsgAddPackage) error { +func (vm *VMKeeper) AddPackage(ctx sdk.Context, msg MsgAddPackage) (err error) { creator := msg.Creator pkgPath := msg.Package.Path memPkg := msg.Package deposit := msg.Deposit - store := vm.getGnoStore(ctx) + gnostore := vm.getGnoStore(ctx) // Validate arguments. if creator.IsZero() { @@ -152,7 +152,7 @@ func (vm *VMKeeper) AddPackage(ctx sdk.Context, msg MsgAddPackage) error { if err := msg.Package.Validate(); err != nil { return ErrInvalidPkgPath(err.Error()) } - if pv := store.GetPackage(pkgPath, false); pv != nil { + if pv := gnostore.GetPackage(pkgPath, false); pv != nil { return ErrInvalidPkgPath("package already exists: " + pkgPath) } @@ -170,7 +170,7 @@ func (vm *VMKeeper) AddPackage(ctx sdk.Context, msg MsgAddPackage) error { // - check if caller is in Admins or Editors. // - check if namespace is not in pause. - err := vm.bank.SendCoins(ctx, creator, pkgAddr, deposit) + err = vm.bank.SendCoins(ctx, creator, pkgAddr, deposit) if err != nil { return err } @@ -192,27 +192,39 @@ func (vm *VMKeeper) AddPackage(ctx sdk.Context, msg MsgAddPackage) error { gno.MachineOptions{ PkgPath: "", Output: os.Stdout, // XXX - Store: store, - Alloc: store.GetAllocator(), + Store: gnostore, + Alloc: gnostore.GetAllocator(), Context: msgCtx, MaxCycles: vm.maxCycles, + GasMeter: ctx.GasMeter(), }) defer m2.Release() + defer func() { + if r := recover(); r != nil { + switch r.(type) { + case store.OutOfGasException: // panic in consumeGas() + panic(r) + default: + err = errors.Wrap(fmt.Errorf("%v", r), "VM addpkg panic: %v\n%s\n", + r, m2.String()) + return + } + } + }() m2.RunMemPackage(memPkg, true) - return nil } -// Calls calls a public Gno function (for delivertx). +// Call calls a public Gno function (for delivertx). func (vm *VMKeeper) Call(ctx sdk.Context, msg MsgCall) (res string, err error) { pkgPath := msg.PkgPath // to import fnc := msg.Func - store := vm.getGnoStore(ctx) + gnostore := vm.getGnoStore(ctx) // Get the package and function type. - pv := store.GetPackage(pkgPath, false) + pv := gnostore.GetPackage(pkgPath, false) pl := gno.PackageNodeLocation(pkgPath) - pn := store.GetBlockNode(pl).(*gno.PackageNode) - ft := pn.GetStaticTypeOf(store, gno.Name(fnc)).(*gno.FuncType) + pn := gnostore.GetBlockNode(pl).(*gno.PackageNode) + ft := pn.GetStaticTypeOf(gnostore, gno.Name(fnc)).(*gno.FuncType) // Make main Package with imports. mpn := gno.NewPackageNode("main", "main", nil) mpn.Define("pkg", gno.TypedValue{T: &gno.PackageType{}, V: pv}) @@ -269,22 +281,27 @@ func (vm *VMKeeper) Call(ctx sdk.Context, msg MsgCall) (res string, err error) { gno.MachineOptions{ PkgPath: "", Output: os.Stdout, // XXX - Store: store, + Store: gnostore, Context: msgCtx, - Alloc: store.GetAllocator(), + Alloc: gnostore.GetAllocator(), MaxCycles: vm.maxCycles, + GasMeter: ctx.GasMeter(), }) + defer m.Release() m.SetActivePackage(mpv) defer func() { if r := recover(); r != nil { - err = errors.Wrap(fmt.Errorf("%v", r), "VM call panic: %v\n%s\n", - r, m.String()) - return + switch r.(type) { + case store.OutOfGasException: // panic in consumeGas() + panic(r) + default: + err = errors.Wrap(fmt.Errorf("%v", r), "VM call panic: %v\n%s\n", + r, m.String()) + return + } } - m.Release() }() rtvs := m.Eval(xn) - for i, rtv := range rtvs { res = res + rtv.String() if i < len(rtvs)-1 { @@ -299,7 +316,7 @@ func (vm *VMKeeper) Call(ctx sdk.Context, msg MsgCall) (res string, err error) { func (vm *VMKeeper) Run(ctx sdk.Context, msg MsgRun) (res string, err error) { caller := msg.Caller pkgAddr := caller - store := vm.getGnoStore(ctx) + gnostore := vm.getGnoStore(ctx) send := msg.Send memPkg := msg.Package @@ -341,34 +358,54 @@ func (vm *VMKeeper) Run(ctx sdk.Context, msg MsgRun) (res string, err error) { gno.MachineOptions{ PkgPath: "", Output: buf, - Store: store, - Alloc: store.GetAllocator(), + Store: gnostore, + Alloc: gnostore.GetAllocator(), Context: msgCtx, MaxCycles: vm.maxCycles, + GasMeter: ctx.GasMeter(), }) + // XXX MsgRun does not have pkgPath. How do we find it on chain? defer m.Release() + defer func() { + if r := recover(); r != nil { + switch r.(type) { + case store.OutOfGasException: // panic in consumeGas() + panic(r) + default: + err = errors.Wrap(fmt.Errorf("%v", r), "VM run main addpkg panic: %v\n%s\n", + r, m.String()) + return + } + } + }() + _, pv := m.RunMemPackage(memPkg, false) m2 := gno.NewMachineWithOptions( gno.MachineOptions{ PkgPath: "", Output: buf, - Store: store, - Alloc: store.GetAllocator(), + Store: gnostore, + Alloc: gnostore.GetAllocator(), Context: msgCtx, MaxCycles: vm.maxCycles, + GasMeter: ctx.GasMeter(), }) + defer m2.Release() m2.SetActivePackage(pv) defer func() { if r := recover(); r != nil { - err = errors.Wrap(fmt.Errorf("%v", r), "VM call panic: %v\n%s\n", - r, m2.String()) - return + switch r.(type) { + case store.OutOfGasException: // panic in consumeGas() + panic(r) + default: + err = errors.Wrap(fmt.Errorf("%v", r), "VM run main call panic: %v\n%s\n", + r, m2.String()) + return + } } - m2.Release() }() m2.RunMain() - res = buf.String() return res, nil } @@ -438,10 +475,10 @@ func (vm *VMKeeper) QueryFuncs(ctx sdk.Context, pkgPath string) (fsigs FunctionS // TODO: then, rename to "Eval". func (vm *VMKeeper) QueryEval(ctx sdk.Context, pkgPath string, expr string) (res string, err error) { alloc := gno.NewAllocator(maxAllocQuery) - store := vm.getGnoStore(ctx) + gnostore := vm.getGnoStore(ctx) pkgAddr := gno.DerivePkgAddr(pkgPath) // Get Package. - pv := store.GetPackage(pkgPath, false) + pv := gnostore.GetPackage(pkgPath, false) if pv == nil { err = ErrInvalidPkgPath(fmt.Sprintf( "package not found: %s", pkgPath)) @@ -468,18 +505,24 @@ func (vm *VMKeeper) QueryEval(ctx sdk.Context, pkgPath string, expr string) (res gno.MachineOptions{ PkgPath: pkgPath, Output: os.Stdout, // XXX - Store: store, + Store: gnostore, Context: msgCtx, Alloc: alloc, MaxCycles: vm.maxCycles, + GasMeter: ctx.GasMeter(), }) + defer m.Release() defer func() { if r := recover(); r != nil { - err = errors.Wrap(fmt.Errorf("%v", r), "VM query eval panic: %v\n%s\n", - r, m.String()) - return + switch r.(type) { + case store.OutOfGasException: // panic in consumeGas() + panic(r) + default: + err = errors.Wrap(fmt.Errorf("%v", r), "VM query eval panic: %v\n%s\n", + r, m.String()) + return + } } - m.Release() }() rtvs := m.Eval(xx) res = "" @@ -498,10 +541,10 @@ func (vm *VMKeeper) QueryEval(ctx sdk.Context, pkgPath string, expr string) (res // TODO: then, rename to "EvalString". func (vm *VMKeeper) QueryEvalString(ctx sdk.Context, pkgPath string, expr string) (res string, err error) { alloc := gno.NewAllocator(maxAllocQuery) - store := vm.getGnoStore(ctx) + gnostore := vm.getGnoStore(ctx) pkgAddr := gno.DerivePkgAddr(pkgPath) // Get Package. - pv := store.GetPackage(pkgPath, false) + pv := gnostore.GetPackage(pkgPath, false) if pv == nil { err = ErrInvalidPkgPath(fmt.Sprintf( "package not found: %s", pkgPath)) @@ -528,18 +571,24 @@ func (vm *VMKeeper) QueryEvalString(ctx sdk.Context, pkgPath string, expr string gno.MachineOptions{ PkgPath: pkgPath, Output: os.Stdout, // XXX - Store: store, + Store: gnostore, Context: msgCtx, Alloc: alloc, MaxCycles: vm.maxCycles, + GasMeter: ctx.GasMeter(), }) + defer m.Release() defer func() { if r := recover(); r != nil { - err = errors.Wrap(fmt.Errorf("%v", r), "VM query eval string panic: %v\n%s\n", - r, m.String()) - return + switch r.(type) { + case store.OutOfGasException: // panic in consumeGas() + panic(r) + default: + err = errors.Wrap(fmt.Errorf("%v", r), "VM query eval string panic: %v\n%s\n", + r, m.String()) + return + } } - m.Release() }() rtvs := m.Eval(xx) if len(rtvs) != 1 { diff --git a/gnovm/pkg/gnolang/alloc.go b/gnovm/pkg/gnolang/alloc.go index a83f8102a2b..495be0d2dc2 100644 --- a/gnovm/pkg/gnolang/alloc.go +++ b/gnovm/pkg/gnolang/alloc.go @@ -100,6 +100,7 @@ func (alloc *Allocator) Allocate(size int64) { // this can happen for map items just prior to assignment. return } + alloc.bytes += size if alloc.bytes > alloc.maxBytes { panic("allocation limit exceeded") diff --git a/gnovm/pkg/gnolang/machine.go b/gnovm/pkg/gnolang/machine.go index ea7be1d1f22..018fac66e64 100644 --- a/gnovm/pkg/gnolang/machine.go +++ b/gnovm/pkg/gnolang/machine.go @@ -14,6 +14,8 @@ import ( "github.com/gnolang/gno/tm2/pkg/errors" "github.com/gnolang/gno/tm2/pkg/std" + "github.com/gnolang/gno/tm2/pkg/store" + "github.com/gnolang/overflow" ) // Exception represents a panic that originates from a gno program. @@ -53,11 +55,10 @@ type Machine struct { CheckTypes bool // not yet used ReadOnly bool MaxCycles int64 - - Output io.Writer - Store Store - Context interface{} - + Output io.Writer + Store Store + Context interface{} + GasMeter store.GasMeter // PanicScope is incremented each time a panic occurs and is reset to // zero when it is recovered. PanicScope uint @@ -96,6 +97,7 @@ type MachineOptions struct { Alloc *Allocator // or see MaxAllocBytes. MaxAllocBytes int64 // or 0 for no limit. MaxCycles int64 // or 0 for no limit. + GasMeter store.GasMeter } // the machine constructor gets spammed @@ -120,6 +122,8 @@ func NewMachineWithOptions(opts MachineOptions) *Machine { checkTypes := opts.CheckTypes readOnly := opts.ReadOnly maxCycles := opts.MaxCycles + vmGasMeter := opts.GasMeter + output := opts.Output if output == nil { output = os.Stdout @@ -154,6 +158,7 @@ func NewMachineWithOptions(opts MachineOptions) *Machine { mm.Output = output mm.Store = store mm.Context = context + mm.GasMeter = vmGasMeter if pv != nil { mm.SetActivePackage(pv) @@ -967,10 +972,17 @@ const ( OpReturnCallDefers Op = 0xD7 // TODO rename? ) +const GasFactorCPU int64 = 1 + //---------------------------------------- // "CPU" steps. func (m *Machine) incrCPU(cycles int64) { + if m.GasMeter != nil { + gasCPU := overflow.Mul64p(cycles, GasFactorCPU) + m.GasMeter.ConsumeGas(gasCPU, "CPUCycles") + } + m.Cycles += cycles if m.MaxCycles != 0 && m.Cycles > m.MaxCycles { panic("CPU cycle overrun") diff --git a/tm2/pkg/sdk/auth/ante.go b/tm2/pkg/sdk/auth/ante.go index 6abc4380e89..a0ab1cb1dd0 100644 --- a/tm2/pkg/sdk/auth/ante.go +++ b/tm2/pkg/sdk/auth/ante.go @@ -393,6 +393,10 @@ func EnsureSufficientMempoolFees(ctx sdk.Context, fee std.Fee) sdk.Result { if fgd == gpd { prod1 := big.NewInt(0).Mul(fga, gpg) // fee amount * price gas prod2 := big.NewInt(0).Mul(fgw, gpa) // fee gas * price amount + // This is equivalent to checking + // That the Fee / GasWanted ratio is greater than or equal to the minimum GasPrice per gas. + // This approach helps us avoid dealing with configurations where the value of + // the minimum gas price is set to 0.00001ugnot/gas. if prod1.Cmp(prod2) >= 0 { return sdk.Result{} } else { diff --git a/tm2/pkg/store/gas/store.go b/tm2/pkg/store/gas/store.go index bcd0cb7ee80..4ffe46dc275 100644 --- a/tm2/pkg/store/gas/store.go +++ b/tm2/pkg/store/gas/store.go @@ -2,6 +2,7 @@ package gas import ( "github.com/gnolang/gno/tm2/pkg/store/types" + "github.com/gnolang/overflow" ) var _ types.Store = &Store{} @@ -29,8 +30,8 @@ func (gs *Store) Get(key []byte) (value []byte) { gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc) value = gs.parent.Get(key) - // TODO overflow-safe math? - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc) + gas := overflow.Mul64p(gs.gasConfig.ReadCostPerByte, types.Gas(len(value))) + gs.gasMeter.ConsumeGas(gas, types.GasReadPerByteDesc) return value } @@ -39,8 +40,9 @@ func (gs *Store) Get(key []byte) (value []byte) { func (gs *Store) Set(key []byte, value []byte) { types.AssertValidValue(value) gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc) - // TODO overflow-safe math? - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc) + + gas := overflow.Mul64p(gs.gasConfig.WriteCostPerByte, types.Gas(len(value))) + gs.gasMeter.ConsumeGas(gas, types.GasWritePerByteDesc) gs.parent.Set(key, value) } @@ -156,7 +158,7 @@ func (gi *gasIterator) Close() { // based on the current value's length. func (gi *gasIterator) consumeSeekGas() { value := gi.Value() - - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc) + gas := overflow.Mul64p(gi.gasConfig.ReadCostPerByte, types.Gas(len(value))) gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc) + gi.gasMeter.ConsumeGas(gas, types.GasValuePerByteDesc) } From fdde3d038a7260bb5d0a31aacd001bf8d16abcd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Milo=C5=A1=20=C5=BDivkovi=C4=87?= Date: Fri, 26 Apr 2024 14:29:01 +0200 Subject: [PATCH 5/6] feat: clean up `gnokey add` + add coverage (#1212) ## Description This PR initially started out as adding support for account derivation to `gnokey add`. However, over the discussions, it turned into a bigger cleanup of the `gnokey add` subcommand suite. Changes done: - extracted ledger support into `gnokey add ledger` - extracted multisig support into `gnokey add multisig` - extracted bech32 public key support into `gnokey add bech32` - added coverage (unit tests) for all functionality, even though they were previously missing - added the `--derive-path` flag to the base `gnokey add` (original functionality of the PR)
Contributors' checklist... - [x] Added new tests, or not needed, or not feasible - [x] Provided an example (e.g. screenshot) to aid review or the PR is self-explanatory - [x] Updated the official documentation or not needed - [x] No breaking changes were made, or a `BREAKING CHANGE: xxx` message was included in the description - [ ] Added references to related issues and PRs - [ ] Provided any useful hints for running manual tests - [ ] Added new benchmarks to [generated graphs](https://gnoland.github.io/benchmarks), if any. More info [here](https://github.com/gnolang/gno/blob/master/.benchmarks/README.md).
--- .github/workflows/tm2.yml | 2 +- tm2/Makefile | 2 +- tm2/pkg/crypto/keys/client/add.go | 333 +++++++-------- tm2/pkg/crypto/keys/client/add_bech32.go | 94 +++++ tm2/pkg/crypto/keys/client/add_bech32_test.go | 202 +++++++++ tm2/pkg/crypto/keys/client/add_ledger.go | 76 ++++ .../keys/client/add_ledger_skipped_test.go | 10 + tm2/pkg/crypto/keys/client/add_ledger_test.go | 170 ++++++++ tm2/pkg/crypto/keys/client/add_multisig.go | 138 ++++++ .../crypto/keys/client/add_multisig_test.go | 121 ++++++ tm2/pkg/crypto/keys/client/add_test.go | 394 ++++++++++++++---- .../keys/keybase_ledger_skipped_test.go | 18 + tm2/pkg/crypto/keys/keybase_ledger_test.go | 43 ++ tm2/pkg/crypto/keys/keybase_test.go | 47 --- tm2/pkg/crypto/ledger/discover.go | 19 + tm2/pkg/crypto/ledger/discover_mock.go | 69 +++ tm2/pkg/crypto/ledger/ledger_secp256k1.go | 12 - 17 files changed, 1439 insertions(+), 311 deletions(-) create mode 100644 tm2/pkg/crypto/keys/client/add_bech32.go create mode 100644 tm2/pkg/crypto/keys/client/add_bech32_test.go create mode 100644 tm2/pkg/crypto/keys/client/add_ledger.go create mode 100644 tm2/pkg/crypto/keys/client/add_ledger_skipped_test.go create mode 100644 tm2/pkg/crypto/keys/client/add_ledger_test.go create mode 100644 tm2/pkg/crypto/keys/client/add_multisig.go create mode 100644 tm2/pkg/crypto/keys/client/add_multisig_test.go create mode 100644 tm2/pkg/crypto/keys/keybase_ledger_skipped_test.go create mode 100644 tm2/pkg/crypto/keys/keybase_ledger_test.go create mode 100644 tm2/pkg/crypto/ledger/discover.go create mode 100644 tm2/pkg/crypto/ledger/discover_mock.go diff --git a/.github/workflows/tm2.yml b/.github/workflows/tm2.yml index ced6054f9e9..5c7c24e98e1 100644 --- a/.github/workflows/tm2.yml +++ b/.github/workflows/tm2.yml @@ -79,7 +79,7 @@ jobs: working-directory: tm2 run: | export GOPATH=$HOME/go - export GOTEST_FLAGS="-v -p 1 -timeout=20m -coverprofile=coverage.out -covermode=atomic" + export GOTEST_FLAGS="-v -p 1 -timeout=20m -coverprofile=coverage.out -covermode=atomic -tags='ledger_suite'" make ${{ matrix.args }} touch coverage.out - uses: actions/upload-artifact@v4 diff --git a/tm2/Makefile b/tm2/Makefile index 3103ef220b2..f841b989b77 100644 --- a/tm2/Makefile +++ b/tm2/Makefile @@ -20,7 +20,7 @@ GOFMT_FLAGS ?= -w # flags for `make imports`. GOIMPORTS_FLAGS ?= $(GOFMT_FLAGS) # test suite flags. -GOTEST_FLAGS ?= -v -p 1 -timeout=30m +GOTEST_FLAGS ?= -v -p 1 -timeout=30m -tags='ledger_suite' ######################################## # Dev tools diff --git a/tm2/pkg/crypto/keys/client/add.go b/tm2/pkg/crypto/keys/client/add.go index 561d2aa5611..3c0c6aaf343 100644 --- a/tm2/pkg/crypto/keys/client/add.go +++ b/tm2/pkg/crypto/keys/client/add.go @@ -5,28 +5,32 @@ import ( "errors" "flag" "fmt" - "sort" + "regexp" "github.com/gnolang/gno/tm2/pkg/commands" "github.com/gnolang/gno/tm2/pkg/crypto" "github.com/gnolang/gno/tm2/pkg/crypto/bip39" + "github.com/gnolang/gno/tm2/pkg/crypto/hd" "github.com/gnolang/gno/tm2/pkg/crypto/keys" - "github.com/gnolang/gno/tm2/pkg/crypto/multisig" + "github.com/gnolang/gno/tm2/pkg/crypto/secp256k1" ) +var ( + errInvalidMnemonic = errors.New("invalid bip39 mnemonic") + errInvalidDerivationPath = errors.New("invalid derivation path") +) + +var reDerivationPath = regexp.MustCompile(`^44'\/118'\/\d+'\/0\/\d+$`) + type AddCfg struct { RootCfg *BaseCfg - Multisig commands.StringArr - MultisigThreshold int - NoSort bool - PublicKey string - UseLedger bool - Recover bool - NoBackup bool - DryRun bool - Account uint64 - Index uint64 + Recover bool + NoBackup bool + Account uint64 + Index uint64 + + DerivationPath commands.StringArr } func NewAddCmd(rootCfg *BaseCfg, io commands.IO) *commands.Command { @@ -34,7 +38,7 @@ func NewAddCmd(rootCfg *BaseCfg, io commands.IO) *commands.Command { RootCfg: rootCfg, } - return commands.NewCommand( + cmd := commands.NewCommand( commands.Metadata{ Name: "add", ShortUsage: "add [flags] ", @@ -45,43 +49,17 @@ func NewAddCmd(rootCfg *BaseCfg, io commands.IO) *commands.Command { return execAdd(cfg, args, io) }, ) -} - -func (c *AddCfg) RegisterFlags(fs *flag.FlagSet) { - fs.Var( - &c.Multisig, - "multisig", - "construct and store a multisig public key (implies --pubkey)", - ) - - fs.IntVar( - &c.MultisigThreshold, - "threshold", - 1, - "K out of N required signatures. For use in conjunction with --multisig", - ) - fs.BoolVar( - &c.NoSort, - "nosort", - false, - "keys passed to --multisig are taken in the order they're supplied", - ) - - fs.StringVar( - &c.PublicKey, - "pubkey", - "", - "parse a public key in bech32 format and save it to disk", + cmd.AddSubCommands( + NewAddMultisigCmd(cfg, io), + NewAddLedgerCmd(cfg, io), + NewAddBech32Cmd(cfg, io), ) - fs.BoolVar( - &c.UseLedger, - "ledger", - false, - "store a local reference to a private key on a Ledger device", - ) + return cmd +} +func (c *AddCfg) RegisterFlags(fs *flag.FlagSet) { fs.BoolVar( &c.Recover, "recover", @@ -96,13 +74,6 @@ func (c *AddCfg) RegisterFlags(fs *flag.FlagSet) { "don't print out seed phrase (if others are watching the terminal)", ) - fs.BoolVar( - &c.DryRun, - "dryrun", - false, - "perform action, but don't add key to local keystore", - ) - fs.Uint64Var( &c.Account, "account", @@ -116,170 +87,124 @@ func (c *AddCfg) RegisterFlags(fs *flag.FlagSet) { 0, "address index number for HD derivation", ) -} - -// DryRunKeyPass contains the default key password for genesis transactions -const DryRunKeyPass = "12345678" - -/* -input - - bip39 mnemonic - - bip39 passphrase - - bip44 path - - local encryption password -output - - armor encrypted private key (saved to file) -*/ -func execAdd(cfg *AddCfg, args []string, io commands.IO) error { - var ( - kb keys.Keybase - err error - encryptPassword string + fs.Var( + &c.DerivationPath, + "derivation-path", + "derivation path for deriving the address", ) +} +func execAdd(cfg *AddCfg, args []string, io commands.IO) error { + // Check if the key name is provided if len(args) != 1 { return flag.ErrHelp } - name := args[0] - showMnemonic := !cfg.NoBackup - - if cfg.DryRun { - // we throw this away, so don't enforce args, - // we want to get a new random seed phrase quickly - kb = keys.NewInMemory() - encryptPassword = DryRunKeyPass - } else { - kb, err = keys.NewKeyBaseFromDir(cfg.RootCfg.Home) - if err != nil { - return err + // Validate the derivation paths are correct + for _, path := range cfg.DerivationPath { + // Make sure the path is valid + if _, err := hd.NewParamsFromPath(path); err != nil { + return fmt.Errorf( + "%w, %w", + errInvalidDerivationPath, + err, + ) } - if has, err := kb.HasByName(name); err == nil && has { - // account exists, ask for user confirmation - response, err2 := io.GetConfirmation(fmt.Sprintf("Override the existing name %s", name)) - if err2 != nil { - return err2 - } - if !response { - return errors.New("aborted") - } + // Make sure the path conforms to the Gno derivation path + if !reDerivationPath.MatchString(path) { + return errInvalidDerivationPath } + } - multisigKeys := cfg.Multisig - if len(multisigKeys) != 0 { - var pks []crypto.PubKey - - multisigThreshold := cfg.MultisigThreshold - if err := keys.ValidateMultisigThreshold(multisigThreshold, len(multisigKeys)); err != nil { - return err - } - - for _, keyname := range multisigKeys { - k, err := kb.GetByName(keyname) - if err != nil { - return err - } - pks = append(pks, k.GetPubKey()) - } - - // Handle --nosort - if !cfg.NoSort { - sort.Slice(pks, func(i, j int) bool { - return pks[i].Address().Compare(pks[j].Address()) < 0 - }) - } - - pk := multisig.NewPubKeyMultisigThreshold(multisigThreshold, pks) - if _, err := kb.CreateMulti(name, pk); err != nil { - return err - } - - io.Printfln("Key %q saved to disk.\n", name) - return nil - } + name := args[0] - // ask for a password when generating a local key - if cfg.PublicKey == "" && !cfg.UseLedger { - encryptPassword, err = io.GetCheckPassword( - [2]string{ - "Enter a passphrase to encrypt your key to disk:", - "Repeat the passphrase:", - }, - cfg.RootCfg.InsecurePasswordStdin, - ) - if err != nil { - return err - } - } + // Read the keybase from the home directory + kb, err := keys.NewKeyBaseFromDir(cfg.RootCfg.Home) + if err != nil { + return fmt.Errorf("unable to read keybase, %w", err) } - if cfg.PublicKey != "" { - pk, err := crypto.PubKeyFromBech32(cfg.PublicKey) - if err != nil { - return err - } - _, err = kb.CreateOffline(name, pk) - if err != nil { - return err - } - return nil + // Check if the key exists + exists, err := kb.HasByName(name) + if err != nil { + return fmt.Errorf("unable to fetch key, %w", err) } - account := cfg.Account - index := cfg.Index - - // If we're using ledger, only thing we need is the path and the bech32 prefix. - if cfg.UseLedger { - bech32PrefixAddr := crypto.Bech32AddrPrefix - info, err := kb.CreateLedger(name, keys.Secp256k1, bech32PrefixAddr, uint32(account), uint32(index)) + // Get overwrite confirmation, if any + if exists { + overwrite, err := io.GetConfirmation(fmt.Sprintf("Override the existing name %s", name)) if err != nil { - return err + return fmt.Errorf("unable to get confirmation, %w", err) } - return printCreate(info, false, "", io) + if !overwrite { + return errOverwriteAborted + } + } + + // Ask for a password when generating a local key + encryptPassword, err := io.GetCheckPassword( + [2]string{ + "Enter a passphrase to encrypt your key to disk:", + "Repeat the passphrase:", + }, + cfg.RootCfg.InsecurePasswordStdin, + ) + if err != nil { + return fmt.Errorf("unable to parse provided password, %w", err) } // Get bip39 mnemonic - var mnemonic string - const bip39Passphrase string = "" // XXX research. + mnemonic, err := GenerateMnemonic(mnemonicEntropySize) + if err != nil { + return fmt.Errorf("unable to generate mnemonic, %w", err) + } if cfg.Recover { bip39Message := "Enter your bip39 mnemonic" mnemonic, err = io.GetString(bip39Message) if err != nil { - return err + return fmt.Errorf("unable to parse mnemonic, %w", err) } + // Make sure it's valid if !bip39.IsMnemonicValid(mnemonic) { - return errors.New("invalid mnemonic") - } - } - - if len(mnemonic) == 0 { - mnemonic, err = GenerateMnemonic(mnemonicEntropySize) - if err != nil { - return err + return errInvalidMnemonic } } - info, err := kb.CreateAccount(name, mnemonic, bip39Passphrase, encryptPassword, uint32(account), uint32(index)) + // Save the account + info, err := kb.CreateAccount( + name, + mnemonic, + "", + encryptPassword, + uint32(cfg.Account), + uint32(cfg.Index), + ) if err != nil { - return err + return fmt.Errorf("unable to save account to keybase, %w", err) } + // Print the derived address info + printDerive(mnemonic, cfg.DerivationPath, io) + // Recover key from seed passphrase if cfg.Recover { - // Hide mnemonic from output - showMnemonic = false - mnemonic = "" + printCreate(info, false, "", io) + + return nil } - return printCreate(info, showMnemonic, mnemonic, io) + // Print the key create info + printCreate(info, !cfg.NoBackup, mnemonic, io) + + return nil } -func printCreate(info keys.Info, showMnemonic bool, mnemonic string, io commands.IO) error { +func printCreate(info keys.Info, showMnemonic bool, mnemonic string, io commands.IO) { io.Println("") printNewInfo(info, io) @@ -291,8 +216,6 @@ It is the only way to recover your account if you ever forget your password. %v `, mnemonic) } - - return nil } func printNewInfo(info keys.Info, io commands.IO) { @@ -305,3 +228,59 @@ func printNewInfo(info keys.Info, io commands.IO) { io.Printfln("* %s (%s) - addr: %v pub: %v, path: %v", keyname, keytype, keyaddr, keypub, keypath) } + +// printDerive prints the derived accounts, if any +func printDerive( + mnemonic string, + paths []string, + io commands.IO, +) { + if len(paths) == 0 { + // No accounts to print + return + } + + // Generate the accounts + accounts := generateAccounts( + mnemonic, + paths, + ) + + io.Printf("[Derived Accounts]\n\n") + + // Print them out + for index, path := range paths { + io.Printfln( + "%d. %s: %s", + index, + path, + accounts[index].String(), + ) + } +} + +// generateAccounts the accounts using the provided mnemonics +func generateAccounts(mnemonic string, paths []string) []crypto.Address { + addresses := make([]crypto.Address, len(paths)) + + // Generate the seed + seed := bip39.NewSeed(mnemonic, "") + + for index, path := range paths { + key := generateKeyFromSeed(seed, path) + address := key.PubKey().Address() + + addresses[index] = address + } + + return addresses +} + +// generateKeyFromSeed generates a private key from +// the provided seed and path +func generateKeyFromSeed(seed []byte, path string) crypto.PrivKey { + masterPriv, ch := hd.ComputeMastersFromSeed(seed) + derivedPriv, _ := hd.DerivePrivateKeyForPath(masterPriv, ch, path) + + return secp256k1.PrivKeySecp256k1(derivedPriv) +} diff --git a/tm2/pkg/crypto/keys/client/add_bech32.go b/tm2/pkg/crypto/keys/client/add_bech32.go new file mode 100644 index 00000000000..7b7cb8aca2c --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_bech32.go @@ -0,0 +1,94 @@ +package client + +import ( + "context" + "flag" + "fmt" + + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" +) + +type AddBech32Cfg struct { + RootCfg *AddCfg + + PublicKey string +} + +// NewAddBech32Cmd creates a gnokey add bech32 command +func NewAddBech32Cmd(rootCfg *AddCfg, io commands.IO) *commands.Command { + cfg := &AddBech32Cfg{ + RootCfg: rootCfg, + } + + return commands.NewCommand( + commands.Metadata{ + Name: "bech32", + ShortUsage: "add bech32 [flags] ", + ShortHelp: "adds a public key to the keybase, using the bech32 representation", + }, + cfg, + func(_ context.Context, args []string) error { + return execAddBech32(cfg, args, io) + }, + ) +} + +func (c *AddBech32Cfg) RegisterFlags(fs *flag.FlagSet) { + fs.StringVar( + &c.PublicKey, + "pubkey", + "", + "parse a public key in bech32 format and save it to disk", + ) +} + +func execAddBech32(cfg *AddBech32Cfg, args []string, io commands.IO) error { + // Validate a key name was provided + if len(args) != 1 { + return flag.ErrHelp + } + + name := args[0] + + // Read the keybase from the home directory + kb, err := keys.NewKeyBaseFromDir(cfg.RootCfg.RootCfg.Home) + if err != nil { + return fmt.Errorf("unable to read keybase, %w", err) + } + + // Check if the key exists + exists, err := kb.HasByName(name) + if err != nil { + return fmt.Errorf("unable to fetch key, %w", err) + } + + // Get overwrite confirmation, if any + if exists { + overwrite, err := io.GetConfirmation(fmt.Sprintf("Override the existing name %s", name)) + if err != nil { + return fmt.Errorf("unable to get confirmation, %w", err) + } + + if !overwrite { + return errOverwriteAborted + } + } + + // Parse the public key + publicKey, err := crypto.PubKeyFromBech32(cfg.PublicKey) + if err != nil { + return fmt.Errorf("unable to parse public key from bech32, %w", err) + } + + // Save it offline in the keybase + _, err = kb.CreateOffline(name, publicKey) + if err != nil { + return fmt.Errorf("unable to save public key, %w", err) + } + + io.Printfln("Key %q saved to disk.\n", name) + + return nil +} diff --git a/tm2/pkg/crypto/keys/client/add_bech32_test.go b/tm2/pkg/crypto/keys/client/add_bech32_test.go new file mode 100644 index 00000000000..f7697c0184d --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_bech32_test.go @@ -0,0 +1,202 @@ +package client + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto/bip39" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAdd_Bech32(t *testing.T) { + t.Parallel() + + t.Run("valid bech32 addition", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + seed = bip39.NewSeed(generateTestMnemonic(t), "") + account = generateKeyFromSeed(seed, "44'/118'/0'/0/0") + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "bech32", + "--insecure-password-stdin", + "--home", + kbHome, + "--pubkey", + account.PubKey().String(), + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + require.NotNil(t, original) + + assert.Equal(t, account.PubKey().Address().String(), original.GetAddress().String()) + }) + + t.Run("valid bech32 addition, overwrite", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + seed = bip39.NewSeed(generateTestMnemonic(t), "") + originalAccount = generateKeyFromSeed(seed, "44'/118'/0'/0/0") + copyAccount = generateKeyFromSeed(seed, "44'/118'/0'/0/1") + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + baseArgs := []string{ + "add", + "bech32", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + initialArgs := append(baseArgs, []string{ + "--pubkey", + originalAccount.PubKey().String(), + }...) + + require.NoError(t, cmd.ParseAndRun(ctx, initialArgs)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + + require.Equal(t, originalAccount.PubKey().Address().String(), original.GetAddress().String()) + + // Overwrite the key + io.SetIn(strings.NewReader("y\ntest1234\ntest1234\n")) + + secondaryArgs := append(baseArgs, []string{ + "--pubkey", + copyAccount.PubKey().String(), + }...) + + cmd = NewRootCmdWithBaseConfig(io, baseOptions) + require.NoError(t, cmd.ParseAndRun(ctx, secondaryArgs)) + + newKey, err := kb.GetByName(keyName) + require.NoError(t, err) + + require.Equal(t, copyAccount.PubKey().Address().String(), newKey.GetAddress().String()) + }) + + t.Run("no overwrite permission", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + seed = bip39.NewSeed(generateTestMnemonic(t), "") + originalAccount = generateKeyFromSeed(seed, "44'/118'/0'/0/0") + copyAccount = generateKeyFromSeed(seed, "44'/118'/0'/0/1") + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + baseArgs := []string{ + "add", + "bech32", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + initialArgs := append(baseArgs, []string{ + "--pubkey", + originalAccount.PubKey().String(), + }...) + + require.NoError(t, cmd.ParseAndRun(ctx, initialArgs)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + + io.SetIn(strings.NewReader("n\ntest1234\ntest1234\n")) + + // Confirm overwrite + secondaryArgs := append(baseArgs, []string{ + "--pubkey", + copyAccount.PubKey().String(), + }...) + + cmd = NewRootCmdWithBaseConfig(io, baseOptions) + require.ErrorIs(t, cmd.ParseAndRun(ctx, secondaryArgs), errOverwriteAborted) + + newKey, err := kb.GetByName(keyName) + require.NoError(t, err) + + // Make sure the key is not overwritten + assert.Equal(t, original.GetAddress(), newKey.GetAddress()) + }) +} diff --git a/tm2/pkg/crypto/keys/client/add_ledger.go b/tm2/pkg/crypto/keys/client/add_ledger.go new file mode 100644 index 00000000000..97bd4a3bee5 --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_ledger.go @@ -0,0 +1,76 @@ +package client + +import ( + "context" + "flag" + "fmt" + + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" +) + +// NewAddLedgerCmd creates a gnokey add ledger command +func NewAddLedgerCmd(cfg *AddCfg, io commands.IO) *commands.Command { + return commands.NewCommand( + commands.Metadata{ + Name: "ledger", + ShortUsage: "add ledger [flags] ", + ShortHelp: "adds a Ledger key reference to the keybase", + }, + commands.NewEmptyConfig(), + func(_ context.Context, args []string) error { + return execAddLedger(cfg, args, io) + }, + ) +} + +func execAddLedger(cfg *AddCfg, args []string, io commands.IO) error { + // Validate a key name was provided + if len(args) != 1 { + return flag.ErrHelp + } + + name := args[0] + + // Read the keybase from the home directory + kb, err := keys.NewKeyBaseFromDir(cfg.RootCfg.Home) + if err != nil { + return fmt.Errorf("unable to read keybase, %w", err) + } + + // Check if the key exists + exists, err := kb.HasByName(name) + if err != nil { + return fmt.Errorf("unable to fetch key, %w", err) + } + + // Get overwrite confirmation, if any + if exists { + overwrite, err := io.GetConfirmation(fmt.Sprintf("Override the existing name %s", name)) + if err != nil { + return fmt.Errorf("unable to get confirmation, %w", err) + } + + if !overwrite { + return errOverwriteAborted + } + } + + // Create the ledger reference + info, err := kb.CreateLedger( + name, + keys.Secp256k1, + crypto.Bech32AddrPrefix, + uint32(cfg.Account), + uint32(cfg.Index), + ) + if err != nil { + return fmt.Errorf("unable to create Ledger reference in keybase, %w", err) + } + + // Print the information + printCreate(info, false, "", io) + + return nil +} diff --git a/tm2/pkg/crypto/keys/client/add_ledger_skipped_test.go b/tm2/pkg/crypto/keys/client/add_ledger_skipped_test.go new file mode 100644 index 00000000000..8a09d060b16 --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_ledger_skipped_test.go @@ -0,0 +1,10 @@ +//go:build !ledger_suite +// +build !ledger_suite + +package client + +import "testing" + +func TestAdd_Ledger(t *testing.T) { + t.Skip("Please enable the 'ledger_suite' build tags") +} diff --git a/tm2/pkg/crypto/keys/client/add_ledger_test.go b/tm2/pkg/crypto/keys/client/add_ledger_test.go new file mode 100644 index 00000000000..c1384efcb79 --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_ledger_test.go @@ -0,0 +1,170 @@ +//go:build ledger_suite +// +build ledger_suite + +package client + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Make sure to run these tests with the following tag enabled: +// -tags='ledger_suite' +func TestAdd_Ledger(t *testing.T) { + t.Parallel() + + t.Run("valid ledger reference added", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "ledger", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + require.NotNil(t, original) + }) + + t.Run("valid ledger reference added, overwrite", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "ledger", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + require.NotNil(t, original) + + io.SetIn(strings.NewReader("y\ntest1234\ntest1234\n")) + + cmd = NewRootCmdWithBaseConfig(io, baseOptions) + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + newKey, err := kb.GetByName(keyName) + require.NoError(t, err) + + // Make sure the different key is generated and overwritten + assert.NotEqual(t, original.GetAddress(), newKey.GetAddress()) + }) + + t.Run("valid ledger reference added, no overwrite permission", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "ledger", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + require.NotNil(t, original) + + io.SetIn(strings.NewReader("n\ntest1234\ntest1234\n")) + + cmd = NewRootCmdWithBaseConfig(io, baseOptions) + require.ErrorIs(t, cmd.ParseAndRun(ctx, args), errOverwriteAborted) + + newKey, err := kb.GetByName(keyName) + require.NoError(t, err) + + // Make sure the key is not overwritten + assert.Equal(t, original.GetAddress(), newKey.GetAddress()) + }) +} diff --git a/tm2/pkg/crypto/keys/client/add_multisig.go b/tm2/pkg/crypto/keys/client/add_multisig.go new file mode 100644 index 00000000000..39b90571143 --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_multisig.go @@ -0,0 +1,138 @@ +package client + +import ( + "context" + "errors" + "flag" + "fmt" + "sort" + + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" + "github.com/gnolang/gno/tm2/pkg/crypto/multisig" +) + +var ( + errOverwriteAborted = errors.New("overwrite aborted") + errUnableToVerifyMultisig = errors.New("unable to verify multisig threshold") +) + +type AddMultisigCfg struct { + RootCfg *AddCfg + + NoSort bool + Multisig commands.StringArr + MultisigThreshold int +} + +// NewAddMultisigCmd creates a gnokey add multisig command +func NewAddMultisigCmd(rootCfg *AddCfg, io commands.IO) *commands.Command { + cfg := &AddMultisigCfg{ + RootCfg: rootCfg, + } + + return commands.NewCommand( + commands.Metadata{ + Name: "multisig", + ShortUsage: "add multisig [flags] ", + ShortHelp: "adds a multisig key reference to the keybase", + }, + cfg, + func(_ context.Context, args []string) error { + return execAddMultisig(cfg, args, io) + }, + ) +} + +func (c *AddMultisigCfg) RegisterFlags(fs *flag.FlagSet) { + fs.BoolVar( + &c.NoSort, + "nosort", + false, + "keys passed to --multisig are taken in the order they're supplied", + ) + + fs.Var( + &c.Multisig, + "multisig", + "construct and store a multisig public key", + ) + + fs.IntVar( + &c.MultisigThreshold, + "threshold", + 1, + "K out of N required signatures", + ) +} + +func execAddMultisig(cfg *AddMultisigCfg, args []string, io commands.IO) error { + // Validate a key name was provided + if len(args) != 1 { + return flag.ErrHelp + } + + // Validate the multisig threshold + if err := keys.ValidateMultisigThreshold( + cfg.MultisigThreshold, + len(cfg.Multisig), + ); err != nil { + return errUnableToVerifyMultisig + } + + name := args[0] + + // Read the keybase from the home directory + kb, err := keys.NewKeyBaseFromDir(cfg.RootCfg.RootCfg.Home) + if err != nil { + return fmt.Errorf("unable to read keybase, %w", err) + } + + // Check if the key exists + exists, err := kb.HasByName(name) + if err != nil { + return fmt.Errorf("unable to fetch key, %w", err) + } + + // Get overwrite confirmation, if any + if exists { + overwrite, err := io.GetConfirmation(fmt.Sprintf("Override the existing name %s", name)) + if err != nil { + return fmt.Errorf("unable to get confirmation, %w", err) + } + + if !overwrite { + return errOverwriteAborted + } + } + + publicKeys := make([]crypto.PubKey, 0) + for _, keyName := range cfg.Multisig { + k, err := kb.GetByName(keyName) + if err != nil { + return fmt.Errorf("unable to fetch key, %w", err) + } + + publicKeys = append(publicKeys, k.GetPubKey()) + } + + // Check if the keys should be sorted + if !cfg.NoSort { + sort.Slice(publicKeys, func(i, j int) bool { + return publicKeys[i].Address().Compare(publicKeys[j].Address()) < 0 + }) + } + + // Create a new public key with the multisig threshold + if _, err := kb.CreateMulti( + name, + multisig.NewPubKeyMultisigThreshold(cfg.MultisigThreshold, publicKeys), + ); err != nil { + return fmt.Errorf("unable to create multisig key reference, %w", err) + } + + io.Printfln("Key %q saved to disk.\n", name) + + return nil +} diff --git a/tm2/pkg/crypto/keys/client/add_multisig_test.go b/tm2/pkg/crypto/keys/client/add_multisig_test.go new file mode 100644 index 00000000000..4a350d5faa9 --- /dev/null +++ b/tm2/pkg/crypto/keys/client/add_multisig_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAdd_Multisig(t *testing.T) { + t.Parallel() + + t.Run("invalid multisig threshold", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "multisig", + "--insecure-password-stdin", + "--home", + kbHome, + "--multisig", + "example", + "--threshold", + "2", + keyName, + } + + require.ErrorIs(t, cmd.ParseAndRun(ctx, args), errUnableToVerifyMultisig) + }) + + t.Run("valid multisig reference added", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + mnemonic = generateTestMnemonic(t) + + keyNames = []string{ + "key-1", + "key-2", + } + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("y\ntest1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "multisig", + "--insecure-password-stdin", + "--home", + kbHome, + "--multisig", + keyNames[0], + "--multisig", + keyNames[1], + keyNames[0], + } + + // Prepare the multisig keys + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + for index, keyName := range keyNames { + _, err = kb.CreateAccount( + keyName, + mnemonic, + "", + "123", + 0, + uint32(index), + ) + + require.NoError(t, err) + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Verify the key is multisig + original, err := kb.GetByName(keyNames[0]) + require.NoError(t, err) + require.NotNil(t, original) + + assert.Equal(t, original.GetType(), keys.TypeMulti) + }) +} diff --git a/tm2/pkg/crypto/keys/client/add_test.go b/tm2/pkg/crypto/keys/client/add_test.go index 4110ea32c9a..37638f995bd 100644 --- a/tm2/pkg/crypto/keys/client/add_test.go +++ b/tm2/pkg/crypto/keys/client/add_test.go @@ -1,116 +1,364 @@ package client import ( + "bytes" + "context" "fmt" "strings" "testing" + "time" "github.com/gnolang/gno/tm2/pkg/commands" "github.com/gnolang/gno/tm2/pkg/crypto/keys" - "github.com/gnolang/gno/tm2/pkg/crypto/secp256k1" - "github.com/gnolang/gno/tm2/pkg/testutils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func Test_execAddBasic(t *testing.T) { +func TestAdd_Base_Add(t *testing.T) { t.Parallel() - // make new test dir - kbHome, kbCleanUp := testutils.NewTestCaseDir(t) - assert.NotNil(t, kbHome) - defer kbCleanUp() + t.Run("valid key addition, generated mnemonic", func(t *testing.T) { + t.Parallel() - cfg := &AddCfg{ - RootCfg: &BaseCfg{ - BaseOptions: BaseOptions{ + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ InsecurePasswordStdin: true, Home: kbHome, - }, - }, - } + } - keyName := "keyname1" + keyName = "key-name" + ) - io := commands.NewTestIO() - io.SetIn(strings.NewReader("test1234\ntest1234\n")) + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() - // Create a new key - if err := execAdd(cfg, []string{keyName}, io); err != nil { - t.Fatalf("unable to execute add cmd, %v", err) - } + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) - io.SetIn(strings.NewReader("y\ntest1234\ntest1234\n")) + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) - // Confirm overwrite - if err := execAdd(cfg, []string{keyName}, io); err != nil { - t.Fatalf("unable to execute add cmd, %v", err) - } -} + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } -var ( - test2Mnemonic = "hair stove window more scrap patient endorse left early pear lawn school loud divide vibrant family still bulk lyrics firm plate media critic dove" - test2PubkeyBech32 = "gpub1pgfj7ard9eg82cjtv4u4xetrwqer2dntxyfzxz3pqg5y7u93gpzug38k2p8s8322zpdm96t0ch87ax88sre4vnclz2jcy8uyhst" -) + require.NoError(t, cmd.ParseAndRun(ctx, args)) -func Test_execAddPublicKey(t *testing.T) { - t.Parallel() + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) - kbHome, kbCleanUp := testutils.NewTestCaseDir(t) - assert.NotNil(t, kbHome) - defer kbCleanUp() - - cfg := &AddCfg{ - RootCfg: &BaseCfg{ - BaseOptions: BaseOptions{ - Home: kbHome, - }, - }, - PublicKey: test2PubkeyBech32, // test2 account - } + original, err := kb.GetByName(keyName) + require.NoError(t, err) + require.NotNil(t, original) + }) + + t.Run("valid key addition, overwrite", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + + io.SetIn(strings.NewReader("y\ntest1234\ntest1234\n")) + + cmd = NewRootCmdWithBaseConfig(io, baseOptions) + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + newKey, err := kb.GetByName(keyName) + require.NoError(t, err) + + // Make sure the different key is generated and overwritten + assert.NotEqual(t, original.GetAddress(), newKey.GetAddress()) + }) + + t.Run("valid key addition, provided mnemonic", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + mnemonic = generateTestMnemonic(t) + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234" + "\n" + "test1234" + "\n" + mnemonic + "\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + "--recover", + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + key, err := kb.GetByName(keyName) + require.NoError(t, err) + require.NotNil(t, key) + + // Get the account + accounts := generateAccounts(mnemonic, []string{"44'/118'/0'/0/0"}) + + assert.Equal(t, accounts[0].String(), key.GetAddress().String()) + }) + + t.Run("no overwrite permission", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + + keyName = "key-name" + ) - if err := execAdd(cfg, []string{"test2"}, nil); err != nil { - t.Fatalf("unable to execute add cmd, %v", err) + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + io := commands.NewTestIO() + io.SetIn(strings.NewReader("test1234\ntest1234\n")) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + keyName, + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Check the keybase + kb, err := keys.NewKeyBaseFromDir(kbHome) + require.NoError(t, err) + + original, err := kb.GetByName(keyName) + require.NoError(t, err) + + io.SetIn(strings.NewReader("n\ntest1234\ntest1234\n")) + + // Confirm overwrite + cmd = NewRootCmdWithBaseConfig(io, baseOptions) + require.ErrorIs(t, cmd.ParseAndRun(ctx, args), errOverwriteAborted) + + newKey, err := kb.GetByName(keyName) + require.NoError(t, err) + + // Make sure the key is not overwritten + assert.Equal(t, original.GetAddress(), newKey.GetAddress()) + }) +} + +func generateDerivationPaths(count int) []string { + paths := make([]string, count) + + for i := 0; i < count; i++ { + paths[i] = fmt.Sprintf("44'/118'/0'/0/%d", i) } + + return paths } -func Test_execAddRecover(t *testing.T) { +func TestAdd_Derive(t *testing.T) { t.Parallel() - kbHome, kbCleanUp := testutils.NewTestCaseDir(t) - assert.NotNil(t, kbHome) - defer kbCleanUp() + t.Run("valid address derivation", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + mnemonic = generateTestMnemonic(t) + paths = generateDerivationPaths(10) - cfg := &AddCfg{ - RootCfg: &BaseCfg{ - BaseOptions: BaseOptions{ + baseOptions = BaseOptions{ InsecurePasswordStdin: true, Home: kbHome, - }, - }, - Recover: true, // init test2 account - } + } - test2Name := "test2" - test2Passphrase := "gn0rocks!" + dummyPass = "dummy-pass" + ) - io := commands.NewTestIO() - io.SetIn(strings.NewReader(test2Passphrase + "\n" + test2Passphrase + "\n" + test2Mnemonic + "\n")) + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() - if err := execAdd(cfg, []string{test2Name}, io); err != nil { - t.Fatalf("unable to execute add cmd, %v", err) - } + mockOut := bytes.NewBufferString("") + + io := commands.NewTestIO() + io.SetIn(strings.NewReader(dummyPass + "\n" + dummyPass + "\n" + mnemonic + "\n")) + io.SetOut(commands.WriteNopCloser(mockOut)) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + "--recover", + "example-key", + } + + for _, path := range paths { + args = append( + args, []string{ + "--derivation-path", + path, + }..., + ) + } + + require.NoError(t, cmd.ParseAndRun(ctx, args)) + + // Verify the addresses are derived correctly + expectedAccounts := generateAccounts( + mnemonic, + paths, + ) + + // Grab the output + deriveOutput := mockOut.String() + + for _, expectedAccount := range expectedAccounts { + assert.Contains(t, deriveOutput, expectedAccount.String()) + } + }) + + t.Run("malformed derivation path", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + mnemonic = generateTestMnemonic(t) + dummyPass = "dummy-pass" + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + mockOut := bytes.NewBufferString("") + + io := commands.NewTestIO() + io.SetIn(strings.NewReader(dummyPass + "\n" + dummyPass + "\n" + mnemonic + "\n")) + io.SetOut(commands.WriteNopCloser(mockOut)) + + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) + + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + "--recover", + "example-key", + "--derivation-path", + "malformed path", + } + + require.ErrorIs(t, cmd.ParseAndRun(ctx, args), errInvalidDerivationPath) + }) + + t.Run("invalid derivation path", func(t *testing.T) { + t.Parallel() + + var ( + kbHome = t.TempDir() + mnemonic = generateTestMnemonic(t) + dummyPass = "dummy-pass" + baseOptions = BaseOptions{ + InsecurePasswordStdin: true, + Home: kbHome, + } + ) + + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() - kb, err2 := keys.NewKeyBaseFromDir(kbHome) - assert.NoError(t, err2) + mockOut := bytes.NewBufferString("") - infos, err3 := kb.List() - assert.NoError(t, err3) + io := commands.NewTestIO() + io.SetIn(strings.NewReader(dummyPass + "\n" + dummyPass + "\n" + mnemonic + "\n")) + io.SetOut(commands.WriteNopCloser(mockOut)) - info := infos[0] + // Create the command + cmd := NewRootCmdWithBaseConfig(io, baseOptions) - keypub := info.GetPubKey() - keypub = keypub.(secp256k1.PubKeySecp256k1) + args := []string{ + "add", + "--insecure-password-stdin", + "--home", + kbHome, + "--recover", + "example-key", + "--derivation-path", + "44'/500'/0'/0/0", // invalid coin type + } - s := fmt.Sprintf("%s", keypub) - assert.Equal(t, s, test2PubkeyBech32) + require.ErrorIs(t, cmd.ParseAndRun(ctx, args), errInvalidDerivationPath) + }) } diff --git a/tm2/pkg/crypto/keys/keybase_ledger_skipped_test.go b/tm2/pkg/crypto/keys/keybase_ledger_skipped_test.go new file mode 100644 index 00000000000..d406f10f2ed --- /dev/null +++ b/tm2/pkg/crypto/keys/keybase_ledger_skipped_test.go @@ -0,0 +1,18 @@ +//go:build !ledger_suite +// +build !ledger_suite + +package keys + +import "testing" + +func TestCreateLedgerUnsupportedAlgo(t *testing.T) { + t.Parallel() + + t.Skip("this test needs to be run with the `ledger_suite` tag enabled") +} + +func TestCreateLedger(t *testing.T) { + t.Parallel() + + t.Skip("this test needs to be run with the `ledger_suite` tag enabled") +} diff --git a/tm2/pkg/crypto/keys/keybase_ledger_test.go b/tm2/pkg/crypto/keys/keybase_ledger_test.go new file mode 100644 index 00000000000..0f2fca79f90 --- /dev/null +++ b/tm2/pkg/crypto/keys/keybase_ledger_test.go @@ -0,0 +1,43 @@ +//go:build ledger_suite +// +build ledger_suite + +package keys + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateLedgerUnsupportedAlgo(t *testing.T) { + t.Parallel() + + kb := NewInMemory() + _, err := kb.CreateLedger("some_account", Ed25519, "cosmos", 0, 1) + assert.Error(t, err) + assert.Equal(t, "unsupported signing algo: only secp256k1 is supported", err.Error()) +} + +func TestCreateLedger(t *testing.T) { + t.Parallel() + + kb := NewInMemory() + + // test_cover and test_unit will result in different answers + // test_cover does not compile some dependencies so ledger is disabled + // test_unit may add a ledger mock + // both cases are acceptable + _, err := kb.CreateLedger("some_account", Secp256k1, "cosmos", 3, 1) + require.NoError(t, err) + + // Check that restoring the key gets the same results + restoredKey, err := kb.GetByName("some_account") + assert.NotNil(t, restoredKey) + assert.Equal(t, "some_account", restoredKey.GetName()) + assert.Equal(t, TypeLedger, restoredKey.GetType()) + + path, err := restoredKey.GetPath() + assert.NoError(t, err) + assert.Equal(t, "44'/118'/3'/0/1", path.String()) +} diff --git a/tm2/pkg/crypto/keys/keybase_test.go b/tm2/pkg/crypto/keys/keybase_test.go index 0c43dbd8dc5..32cc8788b52 100644 --- a/tm2/pkg/crypto/keys/keybase_test.go +++ b/tm2/pkg/crypto/keys/keybase_test.go @@ -24,53 +24,6 @@ func TestCreateAccountInvalidMnemonic(t *testing.T) { assert.Equal(t, "invalid mnemonic", err.Error()) } -func TestCreateLedgerUnsupportedAlgo(t *testing.T) { - t.Parallel() - - kb := NewInMemory() - _, err := kb.CreateLedger("some_account", Ed25519, "cosmos", 0, 1) - assert.Error(t, err) - assert.Equal(t, "unsupported signing algo: only secp256k1 is supported", err.Error()) -} - -func TestCreateLedger(t *testing.T) { - t.Parallel() - - kb := NewInMemory() - - // test_cover and test_unit will result in different answers - // test_cover does not compile some dependencies so ledger is disabled - // test_unit may add a ledger mock - // both cases are acceptable - ledger, err := kb.CreateLedger("some_account", Secp256k1, "cosmos", 3, 1) - if err != nil { - assert.Error(t, err) - assert.Contains(t, err.Error(), "LedgerHID device (idx 0) not found.") - - assert.Nil(t, ledger) - t.Skip("ledger nano S: support for ledger devices is not available in this executable") - return - } - - // The mock is available, check that the address is correct - pubKey := ledger.GetPubKey() - pubs := crypto.PubKeyToBech32(pubKey) - assert.Equal(t, "cosmospub1addwnpepqdszcr95mrqqs8lw099aa9h8h906zmet22pmwe9vquzcgvnm93eqygufdlv", pubs) - - // Check that restoring the key gets the same results - restoredKey, err := kb.GetByName("some_account") - assert.NotNil(t, restoredKey) - assert.Equal(t, "some_account", restoredKey.GetName()) - assert.Equal(t, TypeLedger, restoredKey.GetType()) - pubKey = restoredKey.GetPubKey() - pubs = crypto.PubKeyToBech32(pubKey) - assert.Equal(t, "cosmospub1addwnpepqdszcr95mrqqs8lw099aa9h8h906zmet22pmwe9vquzcgvnm93eqygufdlv", pubs) - - path, err := restoredKey.GetPath() - assert.NoError(t, err) - assert.Equal(t, "44'/118'/3'/0/1", path.String()) -} - // TestKeyManagement makes sure we can manipulate these keys well func TestKeyManagement(t *testing.T) { t.Parallel() diff --git a/tm2/pkg/crypto/ledger/discover.go b/tm2/pkg/crypto/ledger/discover.go new file mode 100644 index 00000000000..d610b56635e --- /dev/null +++ b/tm2/pkg/crypto/ledger/discover.go @@ -0,0 +1,19 @@ +//go:build !ledger_suite +// +build !ledger_suite + +package ledger + +import ( + ledger_go "github.com/cosmos/ledger-cosmos-go" +) + +// discoverLedger defines a function to be invoked at runtime for discovering +// a connected Ledger device. +var discoverLedger discoverLedgerFn = func() (LedgerSECP256K1, error) { + device, err := ledger_go.FindLedgerCosmosUserApp() + if err != nil { + return nil, err + } + + return device, nil +} diff --git a/tm2/pkg/crypto/ledger/discover_mock.go b/tm2/pkg/crypto/ledger/discover_mock.go new file mode 100644 index 00000000000..1f5bdbafdf3 --- /dev/null +++ b/tm2/pkg/crypto/ledger/discover_mock.go @@ -0,0 +1,69 @@ +//go:build ledger_suite +// +build ledger_suite + +package ledger + +import ( + btcec "github.com/btcsuite/btcd/btcec/v2" + "github.com/gnolang/gno/tm2/pkg/crypto/secp256k1" +) + +// discoverLedger defines a function to be invoked at runtime for discovering +// a connected Ledger device. +var discoverLedger discoverLedgerFn = func() (LedgerSECP256K1, error) { + privateKey := secp256k1.GenPrivKey() + + _, pubKeyObject := btcec.PrivKeyFromBytes(privateKey[:]) + + return &MockLedger{ + GetAddressPubKeySECP256K1Fn: func(data []uint32, str string) ([]byte, string, error) { + return pubKeyObject.SerializeCompressed(), privateKey.PubKey().Address().String(), nil + }, + }, nil +} + +type ( + closeDelegate func() error + getPublicKeySECP256K1Delegate func([]uint32) ([]byte, error) + getAddressPubKeySECP256K1Delegate func([]uint32, string) ([]byte, string, error) + signSECP256K1Delegate func([]uint32, []byte, byte) ([]byte, error) +) + +type MockLedger struct { + CloseFn closeDelegate + GetPublicKeySECP256K1Fn getPublicKeySECP256K1Delegate + GetAddressPubKeySECP256K1Fn getAddressPubKeySECP256K1Delegate + SignSECP256K1Fn signSECP256K1Delegate +} + +func (m *MockLedger) Close() error { + if m.CloseFn != nil { + return m.CloseFn() + } + + return nil +} + +func (m *MockLedger) GetPublicKeySECP256K1(data []uint32) ([]byte, error) { + if m.GetPublicKeySECP256K1Fn != nil { + return m.GetPublicKeySECP256K1Fn(data) + } + + return nil, nil +} + +func (m *MockLedger) GetAddressPubKeySECP256K1(data []uint32, str string) ([]byte, string, error) { + if m.GetAddressPubKeySECP256K1Fn != nil { + return m.GetAddressPubKeySECP256K1Fn(data, str) + } + + return nil, "", nil +} + +func (m *MockLedger) SignSECP256K1(d1 []uint32, d2 []byte, d3 byte) ([]byte, error) { + if m.SignSECP256K1Fn != nil { + return m.SignSECP256K1Fn(d1, d2, d3) + } + + return nil, nil +} diff --git a/tm2/pkg/crypto/ledger/ledger_secp256k1.go b/tm2/pkg/crypto/ledger/ledger_secp256k1.go index f154dbf376c..56877b813a5 100644 --- a/tm2/pkg/crypto/ledger/ledger_secp256k1.go +++ b/tm2/pkg/crypto/ledger/ledger_secp256k1.go @@ -9,7 +9,6 @@ import ( "github.com/btcsuite/btcd/btcec/v2/ecdsa" secp "github.com/decred/dcrd/dcrec/secp256k1/v4" - ledger "github.com/cosmos/ledger-cosmos-go" "github.com/gnolang/gno/tm2/pkg/amino" "github.com/gnolang/gno/tm2/pkg/crypto" "github.com/gnolang/gno/tm2/pkg/crypto/hd" @@ -45,17 +44,6 @@ type ( } ) -// discoverLedger defines a function to be invoked at runtime for discovering -// a connected Ledger device. -var discoverLedger discoverLedgerFn = func() (LedgerSECP256K1, error) { - device, err := ledger.FindLedgerCosmosUserApp() - if err != nil { - return nil, err - } - - return device, nil -} - // NewPrivKeyLedgerSecp256k1Unsafe will generate a new key and store the public key for later use. // // This function is marked as unsafe as it will retrieve a pubkey without user verification. From 15ad7795db992e1f403a393cb43d9a7e63fe4044 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Milo=C5=A1=20=C5=BDivkovi=C4=87?= Date: Fri, 26 Apr 2024 15:44:45 +0200 Subject: [PATCH 6/6] fix: add support for a ws client & batch processing over ws (#1498) ## Description Let me start this PR description by explaining _what_ I wanted to accomplish, so you are not discouraged when reading the file changes. I wanted to: - create a WS client from outside the repository (just like we can create http clients) - have the WS client support batch requests - have the TM2 JSON-RPC server support batch requests / responses over WS - **not** have to rewrite core client / server logic and APIs It might seem odd, reading the 3rd point and thinking this is not supported. The truth is actually much more troubling. Our JSON-RPC server (and client!) implementations are not great. HTTP requests are handled and parsed in a completely different flow than WS requests, even though the output of both should be identical (just over different mediums). Lots of logic is coupled, making it hard to extract and simplify. I've broken the WS / HTTP implementation multiple times over the course of this PR, and all of the tests were _passing_, even though I've critically broken the module at the time. The client code for the JSON-RPC server requires a _response type_ (of course, for Amino result parsing) to be given at the moment of calling, which is not amazing considering our WS implementation is async, and doesn't have these response types in the result parsing context (these logic flows are handled by different threads). What I ended up doing: - added support for a WS client - this was a bigger effort than expected; I extracted and simplified the batching logic, but was still blocked by the lack of batch support in WS request handling - added batch support for the TM2 JSON-RPC server - I basically mirrored the HTTP batch request handling (hint that these should be identical flows) - **BREAKING: completely overhauled our JSON-RPC client libraries and actual instances (http / ws)**, for a couple of reasons: - I tried to add support for all previously mentioned items, but it was impossible with the architecture that was in place (`baseRPCClient`). The slightly tweaked API (for creating HTTP / WS clients, and using batches) is much simpler to use, and actually has error handling. - We didn't have nearly enough coverage and good tests for the functionality -- now we have a suite of E2E and unit tests that give confidence. I will start an effort in the near future for refactoring the JSON-RPC server code from the ground up in a subsequent PR, this time with specs and tests at the forefront. ### How to test out the websockets To test out the WS responses, you can use a tool like [websocat](https://github.com/vi/websocat). 1. start a local chain 2. run `websocat ws://127.0.0.1:26657/websocket` (this is the default URL) 3. send a batch request: ```shell [ { "id": 1, "jsonrpc": "2.0", "method": "status", "params": [] }, { "id": 2, "jsonrpc": "2.0", "method": "status", "params": [] } ] ``` ### How to test out the updated client code I created the following snippet for easily testing the functionality updated in this PR: - single HTTP / WS requests - batch HTTP / WS requests ```go package main import ( "context" "fmt" "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" ) func main() { // HTTP // fmt.Printf("\n--- HTTP CLIENT ---\n") // Open HTTP connection httpClient, err := client.NewHTTPClient("http://127.0.0.1:26657") if err != nil { panic("unable to start HTTP client") } // Get a single status status, err := httpClient.Status() if err != nil { fmt.Println("Unable to send single status (HTTP)!") panic(err) } fmt.Printf("\n\nHTTP - Single status: %v\n\n", status) // Get a batch of statuses httpBatch := httpClient.NewBatch() // Add 10 status requests for i := 0; i < 10; i++ { if err := httpBatch.Status(); err != nil { fmt.Println("Unable to add status request to HTTP batch!") panic(err) } } // Send the batch results, err := httpBatch.Send(context.Background()) if err != nil { fmt.Println("Unable to send HTTP batch!") panic(err) } for index, resultRaw := range results { result, ok := resultRaw.(*ctypes.ResultStatus) if !ok { panic("Invalid status type in batch response!") } fmt.Printf("\nStatus %d from batch: %v\n", index, result) } // WS // fmt.Printf("\n--- WS CLIENT ---\n") // Open WS connection wsClient, err := client.NewWSClient("ws://127.0.0.1:26657/websocket") if err != nil { panic("unable to start WS client") } // Get a single status status, err = wsClient.Status() if err != nil { fmt.Println("Unable to send single status (WS)!") panic(err) } fmt.Printf("\n\nWS - Single status: %v\n\n", status) // Get a batch of statuses wsBatch := wsClient.NewBatch() // Add 10 status requests for i := 0; i < 10; i++ { if err := wsBatch.Status(); err != nil { fmt.Println("Unable to add status request to WS batch!") panic(err) } } // Send the batch results, err = wsBatch.Send(context.Background()) if err != nil { fmt.Println("Unable to send WS batch!") panic(err) } for index, resultRaw := range results { result, ok := resultRaw.(*ctypes.ResultStatus) if !ok { panic("Invalid status type in batch response!") } fmt.Printf("\nStatus %d from batch: %v\n", index, result) } if err := wsClient.Close(); err != nil { fmt.Println("Unable to gracefully close WS client!") panic(err) } fmt.Printf("\n\nGreat success!\n\n") } ``` cc @dongwon8247
Contributors' checklist... - [x] Added new tests, or not needed, or not feasible - [x] Provided an example (e.g. screenshot) to aid review or the PR is self-explanatory - [x] Updated the official documentation or not needed - [x] No breaking changes were made, or a `BREAKING CHANGE: xxx` message was included in the description - [ ] Added references to related issues and PRs - [ ] Provided any useful hints for running manual tests - [ ] Added new benchmarks to [generated graphs](https://gnoland.github.io/benchmarks), if any. More info [here](https://github.com/gnolang/gno/blob/master/.benchmarks/README.md).
--- .github/workflows/docker.yml | 6 +- contribs/gnodev/go.mod | 1 + contribs/gnodev/go.sum | 2 + contribs/gnokeykc/go.mod | 1 + contribs/gnokeykc/go.sum | 4 +- docs/how-to-guides/connecting-from-go.md | 4 +- gno.land/pkg/gnoclient/example_test.go | 6 +- gno.land/pkg/gnoclient/integration_test.go | 28 +- gno.land/pkg/gnoweb/gnoweb.go | 6 +- gnovm/pkg/gnomod/fetch.go | 6 +- go.mod | 16 +- go.sum | 2 + tm2/pkg/bft/rpc/client/batch.go | 425 +++++++++ tm2/pkg/bft/rpc/client/batch_test.go | 515 +++++++++++ tm2/pkg/bft/rpc/client/client.go | 377 ++++++++ tm2/pkg/bft/rpc/client/client_test.go | 871 ++++++++++++++++++ tm2/pkg/bft/rpc/client/doc.go | 18 + tm2/pkg/bft/rpc/client/e2e_test.go | 454 +++++++++ tm2/pkg/bft/rpc/client/examples_test.go | 128 --- tm2/pkg/bft/rpc/client/helpers.go | 49 - tm2/pkg/bft/rpc/client/helpers_test.go | 87 -- tm2/pkg/bft/rpc/client/httpclient.go | 333 ------- tm2/pkg/bft/rpc/client/interface.go | 100 -- .../rpc/client/{localclient.go => local.go} | 0 tm2/pkg/bft/rpc/client/main_test.go | 28 - tm2/pkg/bft/rpc/client/mock/abci.go | 209 ----- tm2/pkg/bft/rpc/client/mock/abci_test.go | 191 ---- tm2/pkg/bft/rpc/client/mock/client.go | 153 --- tm2/pkg/bft/rpc/client/mock/status.go | 52 -- tm2/pkg/bft/rpc/client/mock/status_test.go | 48 - tm2/pkg/bft/rpc/client/mock_test.go | 43 + tm2/pkg/bft/rpc/client/options.go | 12 + tm2/pkg/bft/rpc/client/rpc_test.go | 532 ----------- tm2/pkg/bft/rpc/client/types.go | 79 ++ tm2/pkg/bft/rpc/config/config.go | 9 + tm2/pkg/bft/rpc/config/utils.go | 11 - tm2/pkg/bft/rpc/lib/client/args_test.go | 41 - tm2/pkg/bft/rpc/lib/client/batch/batch.go | 64 ++ .../bft/rpc/lib/client/batch/batch_test.go | 103 +++ tm2/pkg/bft/rpc/lib/client/batch/mock_test.go | 21 + tm2/pkg/bft/rpc/lib/client/client.go | 34 + tm2/pkg/bft/rpc/lib/client/http/client.go | 245 +++++ .../bft/rpc/lib/client/http/client_test.go | 216 +++++ tm2/pkg/bft/rpc/lib/client/http_client.go | 452 --------- .../bft/rpc/lib/client/http_client_test.go | 58 -- .../bft/rpc/lib/client/integration_test.go | 69 -- tm2/pkg/bft/rpc/lib/client/ws/client.go | 285 ++++++ tm2/pkg/bft/rpc/lib/client/ws/client_test.go | 302 ++++++ tm2/pkg/bft/rpc/lib/client/ws/options.go | 14 + tm2/pkg/bft/rpc/lib/client/ws/options_test.go | 38 + tm2/pkg/bft/rpc/lib/client/ws_client.go | 467 ---------- tm2/pkg/bft/rpc/lib/client/ws_client_test.go | 239 ----- tm2/pkg/bft/rpc/lib/rpc_test.go | 395 -------- tm2/pkg/bft/rpc/lib/server/handlers.go | 127 ++- tm2/pkg/bft/rpc/lib/server/handlers_test.go | 4 +- tm2/pkg/bft/rpc/lib/server/http_server.go | 2 +- tm2/pkg/bft/rpc/lib/test/data.json | 9 - tm2/pkg/bft/rpc/lib/test/integration_test.sh | 95 -- tm2/pkg/bft/rpc/lib/test/main.go | 42 - tm2/pkg/bft/rpc/lib/types/types.go | 150 ++- tm2/pkg/bft/rpc/lib/types/types_test.go | 183 ++-- tm2/pkg/bft/rpc/test/helpers.go | 148 --- tm2/pkg/crypto/keys/client/broadcast.go | 5 +- tm2/pkg/crypto/keys/client/query.go | 6 +- 64 files changed, 4460 insertions(+), 4160 deletions(-) create mode 100644 tm2/pkg/bft/rpc/client/batch.go create mode 100644 tm2/pkg/bft/rpc/client/batch_test.go create mode 100644 tm2/pkg/bft/rpc/client/client.go create mode 100644 tm2/pkg/bft/rpc/client/client_test.go create mode 100644 tm2/pkg/bft/rpc/client/doc.go create mode 100644 tm2/pkg/bft/rpc/client/e2e_test.go delete mode 100644 tm2/pkg/bft/rpc/client/examples_test.go delete mode 100644 tm2/pkg/bft/rpc/client/helpers.go delete mode 100644 tm2/pkg/bft/rpc/client/helpers_test.go delete mode 100644 tm2/pkg/bft/rpc/client/httpclient.go delete mode 100644 tm2/pkg/bft/rpc/client/interface.go rename tm2/pkg/bft/rpc/client/{localclient.go => local.go} (100%) delete mode 100644 tm2/pkg/bft/rpc/client/main_test.go delete mode 100644 tm2/pkg/bft/rpc/client/mock/abci.go delete mode 100644 tm2/pkg/bft/rpc/client/mock/abci_test.go delete mode 100644 tm2/pkg/bft/rpc/client/mock/client.go delete mode 100644 tm2/pkg/bft/rpc/client/mock/status.go delete mode 100644 tm2/pkg/bft/rpc/client/mock/status_test.go create mode 100644 tm2/pkg/bft/rpc/client/mock_test.go create mode 100644 tm2/pkg/bft/rpc/client/options.go delete mode 100644 tm2/pkg/bft/rpc/client/rpc_test.go delete mode 100644 tm2/pkg/bft/rpc/config/utils.go delete mode 100644 tm2/pkg/bft/rpc/lib/client/args_test.go create mode 100644 tm2/pkg/bft/rpc/lib/client/batch/batch.go create mode 100644 tm2/pkg/bft/rpc/lib/client/batch/batch_test.go create mode 100644 tm2/pkg/bft/rpc/lib/client/batch/mock_test.go create mode 100644 tm2/pkg/bft/rpc/lib/client/client.go create mode 100644 tm2/pkg/bft/rpc/lib/client/http/client.go create mode 100644 tm2/pkg/bft/rpc/lib/client/http/client_test.go delete mode 100644 tm2/pkg/bft/rpc/lib/client/http_client.go delete mode 100644 tm2/pkg/bft/rpc/lib/client/http_client_test.go delete mode 100644 tm2/pkg/bft/rpc/lib/client/integration_test.go create mode 100644 tm2/pkg/bft/rpc/lib/client/ws/client.go create mode 100644 tm2/pkg/bft/rpc/lib/client/ws/client_test.go create mode 100644 tm2/pkg/bft/rpc/lib/client/ws/options.go create mode 100644 tm2/pkg/bft/rpc/lib/client/ws/options_test.go delete mode 100644 tm2/pkg/bft/rpc/lib/client/ws_client.go delete mode 100644 tm2/pkg/bft/rpc/lib/client/ws_client_test.go delete mode 100644 tm2/pkg/bft/rpc/lib/rpc_test.go delete mode 100644 tm2/pkg/bft/rpc/lib/test/data.json delete mode 100755 tm2/pkg/bft/rpc/lib/test/integration_test.sh delete mode 100644 tm2/pkg/bft/rpc/lib/test/main.go delete mode 100644 tm2/pkg/bft/rpc/test/helpers.go diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0b4def650c0..9616f6c06dd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -34,7 +34,7 @@ jobs: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Build and push uses: docker/build-push-action@v3 with: @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - target: [gnoland-slim, gnokey-slim, gno-slim, gnofaucet-slim, gnoweb-slim] + target: [ gnoland-slim, gnokey-slim, gno-slim, gnofaucet-slim, gnoweb-slim ] steps: - name: Checkout uses: actions/checkout@v4 @@ -71,7 +71,7 @@ jobs: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Build and push uses: docker/build-push-action@v3 with: diff --git a/contribs/gnodev/go.mod b/contribs/gnodev/go.mod index 8b66f72d288..df5236bc4a3 100644 --- a/contribs/gnodev/go.mod +++ b/contribs/gnodev/go.mod @@ -49,6 +49,7 @@ require ( github.com/rivo/uniseg v0.4.3 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/cors v1.10.1 // indirect + github.com/rs/xid v1.5.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect diff --git a/contribs/gnodev/go.sum b/contribs/gnodev/go.sum index 408ca3d5203..dc5528a4be8 100644 --- a/contribs/gnodev/go.sum +++ b/contribs/gnodev/go.sum @@ -154,6 +154,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= diff --git a/contribs/gnokeykc/go.mod b/contribs/gnokeykc/go.mod index c0b4a874576..d368402a3c3 100644 --- a/contribs/gnokeykc/go.mod +++ b/contribs/gnokeykc/go.mod @@ -31,6 +31,7 @@ require ( github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rs/xid v1.5.0 // indirect github.com/stretchr/testify v1.9.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/zondax/hid v0.9.2 // indirect diff --git a/contribs/gnokeykc/go.sum b/contribs/gnokeykc/go.sum index 8416528e4a7..d7bda688d4f 100644 --- a/contribs/gnokeykc/go.sum +++ b/contribs/gnokeykc/go.sum @@ -120,8 +120,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= diff --git a/docs/how-to-guides/connecting-from-go.md b/docs/how-to-guides/connecting-from-go.md index d1cdd324683..1fd47122371 100644 --- a/docs/how-to-guides/connecting-from-go.md +++ b/docs/how-to-guides/connecting-from-go.md @@ -109,7 +109,7 @@ A few things to note: You can initialize the RPC Client used to connect to the Gno.land network with the following line: ```go -rpc := rpcclient.NewHTTP("", "") +rpc := rpcclient.NewHTTP("") ``` A list of Gno.land network endpoints & chain IDs can be found in the [Gno RPC @@ -138,7 +138,7 @@ func main() { } // Initialize the RPC client - rpc := rpcclient.NewHTTP("", "") + rpc := rpcclient.NewHTTP("") // Initialize the gnoclient client := gnoclient.Client{ diff --git a/gno.land/pkg/gnoclient/example_test.go b/gno.land/pkg/gnoclient/example_test.go index 08c3bf19066..1ac3cf17cb0 100644 --- a/gno.land/pkg/gnoclient/example_test.go +++ b/gno.land/pkg/gnoclient/example_test.go @@ -16,7 +16,7 @@ func Example_withDisk() { } remote := "127.0.0.1:26657" - rpcClient := rpcclient.NewHTTP(remote, "/websocket") + rpcClient, _ := rpcclient.NewHTTPClient(remote) client := gnoclient.Client{ Signer: signer, @@ -35,7 +35,7 @@ func Example_withInMemCrypto() { signer, _ := gnoclient.SignerFromBip39(mnemo, chainID, bip39Passphrase, account, index) remote := "127.0.0.1:26657" - rpcClient := rpcclient.NewHTTP(remote, "/websocket") + rpcClient, _ := rpcclient.NewHTTPClient(remote) client := gnoclient.Client{ Signer: signer, @@ -47,7 +47,7 @@ func Example_withInMemCrypto() { // Example_readOnly demonstrates how to initialize a read-only gnoclient, which can only query. func Example_readOnly() { remote := "127.0.0.1:26657" - rpcClient := rpcclient.NewHTTP(remote, "/websocket") + rpcClient, _ := rpcclient.NewHTTPClient(remote) client := gnoclient.Client{ RPCClient: rpcClient, diff --git a/gno.land/pkg/gnoclient/integration_test.go b/gno.land/pkg/gnoclient/integration_test.go index 3244b32af3f..ace9022e35d 100644 --- a/gno.land/pkg/gnoclient/integration_test.go +++ b/gno.land/pkg/gnoclient/integration_test.go @@ -25,7 +25,8 @@ func TestCallSingle_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) // Setup Client client := Client{ @@ -68,7 +69,8 @@ func TestCallMultiple_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) // Setup Client client := Client{ @@ -119,7 +121,8 @@ func TestSendSingle_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) // Setup Client client := Client{ @@ -167,7 +170,8 @@ func TestSendMultiple_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) // Setup Client client := Client{ @@ -223,7 +227,8 @@ func TestRunSingle_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) client := Client{ Signer: signer, @@ -281,7 +286,8 @@ func TestRunMultiple_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) client := Client{ Signer: signer, @@ -361,7 +367,8 @@ func TestAddPackageSingle_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) // Setup Client client := Client{ @@ -404,7 +411,7 @@ func Echo(str string) string { } // Execute AddPackage - _, err := client.AddPackage(baseCfg, msg) + _, err = client.AddPackage(baseCfg, msg) assert.Nil(t, err) // Check for deployed file on the node @@ -429,7 +436,8 @@ func TestAddPackageMultiple_Integration(t *testing.T) { // Init Signer & RPCClient signer := newInMemorySigner(t, "tendermint_test") - rpcClient := rpcclient.NewHTTP(remoteAddr, "/websocket") + rpcClient, err := rpcclient.NewHTTPClient(remoteAddr) + require.NoError(t, err) // Setup Client client := Client{ @@ -495,7 +503,7 @@ func Hello(str string) string { } // Execute AddPackage - _, err := client.AddPackage(baseCfg, msg1, msg2) + _, err = client.AddPackage(baseCfg, msg1, msg2) assert.Nil(t, err) // Check Package #1 diff --git a/gno.land/pkg/gnoweb/gnoweb.go b/gno.land/pkg/gnoweb/gnoweb.go index 4854ed4791e..13c9f8ac2de 100644 --- a/gno.land/pkg/gnoweb/gnoweb.go +++ b/gno.land/pkg/gnoweb/gnoweb.go @@ -421,7 +421,11 @@ func makeRequest(log *slog.Logger, cfg *Config, qpath string, data []byte) (res // Prove: false, XXX } remote := cfg.RemoteAddr - cli := client.NewHTTP(remote, "/websocket") + cli, err := client.NewHTTPClient(remote) + if err != nil { + return nil, fmt.Errorf("unable to create HTTP client, %w", err) + } + qres, err := cli.ABCIQueryWithOptions( qpath, data, opts2) if err != nil { diff --git a/gnovm/pkg/gnomod/fetch.go b/gnovm/pkg/gnomod/fetch.go index 6c2b1a63121..24aaac2f9d4 100644 --- a/gnovm/pkg/gnomod/fetch.go +++ b/gnovm/pkg/gnomod/fetch.go @@ -12,7 +12,11 @@ func queryChain(remote string, qpath string, data []byte) (res *abci.ResponseQue // Height: height, XXX // Prove: false, XXX } - cli := client.NewHTTP(remote, "/websocket") + cli, err := client.NewHTTPClient(remote) + if err != nil { + return nil, err + } + qres, err := cli.ABCIQueryWithOptions(qpath, data, opts2) if err != nil { return nil, err diff --git a/go.mod b/go.mod index 73d8eb442c1..0ad00bb21de 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.21 require ( dario.cat/mergo v1.0.0 + github.com/btcsuite/btcd/btcec/v2 v2.3.3 github.com/btcsuite/btcd/btcutil v1.1.5 github.com/cockroachdb/apd/v3 v3.2.1 github.com/cosmos/ledger-cosmos-go v0.13.3 @@ -26,6 +27,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 github.com/rogpeppe/go-internal v1.12.0 github.com/rs/cors v1.10.1 + github.com/rs/xid v1.5.0 github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 go.etcd.io/bbolt v1.3.9 @@ -49,29 +51,25 @@ require ( require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/gdamore/encoding v1.0.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect - go.opentelemetry.io/otel/trace v1.25.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect -) - -require ( - github.com/btcsuite/btcd/btcec/v2 v2.3.3 - github.com/gdamore/encoding v1.0.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/sessions v1.2.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/nxadm/tail v1.4.11 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/rivo/uniseg v0.4.3 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect + go.opentelemetry.io/otel/trace v1.25.0 // indirect + go.opentelemetry.io/proto/otlp v1.1.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/grpc v1.63.0 // indirect ) diff --git a/go.sum b/go.sum index 17fcdbe266c..0b1d1b203f2 100644 --- a/go.sum +++ b/go.sum @@ -148,6 +148,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= diff --git a/tm2/pkg/bft/rpc/client/batch.go b/tm2/pkg/bft/rpc/client/batch.go new file mode 100644 index 00000000000..9cee83b0f62 --- /dev/null +++ b/tm2/pkg/bft/rpc/client/batch.go @@ -0,0 +1,425 @@ +package client + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/gnolang/gno/tm2/pkg/amino" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + rpcclient "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" + rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +var errEmptyBatch = errors.New("RPC batch is empty") + +type RPCBatch struct { + batch rpcclient.Batch + + // resultMap maps the request ID -> result Amino type + // Why? + // There is a weird quirk in this RPC system where request results + // are marshalled into Amino JSON, before being handed off to the client. + // The client, of course, needs to unmarshal the Amino JSON-encoded response result + // back into a concrete type. + // Since working with an RPC batch is asynchronous + // (requests are added at any time, but results are retrieved when the batch is sent) + // there needs to be a record of what specific type the result needs to be Amino unmarshalled to + resultMap map[string]any + + mux sync.RWMutex +} + +func (b *RPCBatch) Count() int { + b.mux.RLock() + defer b.mux.RUnlock() + + return b.batch.Count() +} + +func (b *RPCBatch) Clear() int { + b.mux.Lock() + defer b.mux.Unlock() + + return b.batch.Clear() +} + +func (b *RPCBatch) Send(ctx context.Context) ([]any, error) { + b.mux.Lock() + defer b.mux.Unlock() + + // Save the initial batch size + batchSize := b.batch.Count() + + // Sanity check for not sending empty batches + if batchSize == 0 { + return nil, errEmptyBatch + } + + // Send the batch + responses, err := b.batch.Send(ctx) + if err != nil { + return nil, fmt.Errorf("unable to send RPC batch, %w", err) + } + + var ( + results = make([]any, 0, batchSize) + errs = make([]error, 0, batchSize) + ) + + // Parse the response results + for _, response := range responses { + // Check the error + if response.Error != nil { + errs = append(errs, response.Error) + results = append(results, nil) + + continue + } + + // Get the result type from the result map + result, exists := b.resultMap[response.ID.String()] + if !exists { + return nil, fmt.Errorf("unexpected response with ID %s", response.ID) + } + + // Amino JSON-unmarshal the response result + if err := amino.UnmarshalJSON(response.Result, result); err != nil { + return nil, fmt.Errorf("unable to parse response result, %w", err) + } + + results = append(results, result) + } + + return results, errors.Join(errs...) +} + +func (b *RPCBatch) addRequest(request rpctypes.RPCRequest, result any) { + b.mux.Lock() + defer b.mux.Unlock() + + // Save the result type + b.resultMap[request.ID.String()] = result + + // Add the request to the batch + b.batch.AddRequest(request) +} + +func (b *RPCBatch) Status() error { + // Prepare the RPC request + request, err := newRequest( + statusMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultStatus{}) + + return nil +} + +func (b *RPCBatch) ABCIInfo() error { + // Prepare the RPC request + request, err := newRequest( + abciInfoMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultABCIInfo{}) + + return nil +} + +func (b *RPCBatch) ABCIQuery(path string, data []byte) error { + return b.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +} + +func (b *RPCBatch) ABCIQueryWithOptions(path string, data []byte, opts ABCIQueryOptions) error { + // Prepare the RPC request + request, err := newRequest( + abciQueryMethod, + map[string]any{ + "path": path, + "data": data, + "height": opts.Height, + "prove": opts.Prove, + }, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultABCIQuery{}) + + return nil +} + +func (b *RPCBatch) BroadcastTxCommit(tx types.Tx) error { + // Prepare the RPC request + request, err := newRequest( + broadcastTxCommitMethod, + map[string]any{"tx": tx}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultBroadcastTxCommit{}) + + return nil +} + +func (b *RPCBatch) BroadcastTxAsync(tx types.Tx) error { + return b.broadcastTX(broadcastTxAsyncMethod, tx) +} + +func (b *RPCBatch) BroadcastTxSync(tx types.Tx) error { + return b.broadcastTX(broadcastTxSyncMethod, tx) +} + +func (b *RPCBatch) broadcastTX(route string, tx types.Tx) error { + // Prepare the RPC request + request, err := newRequest( + route, + map[string]any{"tx": tx}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultBroadcastTx{}) + + return nil +} + +func (b *RPCBatch) UnconfirmedTxs(limit int) error { + // Prepare the RPC request + request, err := newRequest( + unconfirmedTxsMethod, + map[string]any{"limit": limit}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultUnconfirmedTxs{}) + + return nil +} + +func (b *RPCBatch) NumUnconfirmedTxs() error { + // Prepare the RPC request + request, err := newRequest( + numUnconfirmedTxsMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultUnconfirmedTxs{}) + + return nil +} + +func (b *RPCBatch) NetInfo() error { + // Prepare the RPC request + request, err := newRequest( + netInfoMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultNetInfo{}) + + return nil +} + +func (b *RPCBatch) DumpConsensusState() error { + // Prepare the RPC request + request, err := newRequest( + dumpConsensusStateMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultDumpConsensusState{}) + + return nil +} + +func (b *RPCBatch) ConsensusState() error { + // Prepare the RPC request + request, err := newRequest( + consensusStateMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultConsensusState{}) + + return nil +} + +func (b *RPCBatch) ConsensusParams(height *int64) error { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + // Prepare the RPC request + request, err := newRequest( + consensusParamsMethod, + params, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultConsensusParams{}) + + return nil +} + +func (b *RPCBatch) Health() error { + // Prepare the RPC request + request, err := newRequest( + healthMethod, + map[string]any{}, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultHealth{}) + + return nil +} + +func (b *RPCBatch) BlockchainInfo(minHeight, maxHeight int64) error { + // Prepare the RPC request + request, err := newRequest( + blockchainMethod, + map[string]any{ + "minHeight": minHeight, + "maxHeight": maxHeight, + }, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultBlockchainInfo{}) + + return nil +} + +func (b *RPCBatch) Genesis() error { + // Prepare the RPC request + request, err := newRequest(genesisMethod, map[string]any{}) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultGenesis{}) + + return nil +} + +func (b *RPCBatch) Block(height *int64) error { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + // Prepare the RPC request + request, err := newRequest(blockMethod, params) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultBlock{}) + + return nil +} + +func (b *RPCBatch) BlockResults(height *int64) error { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + // Prepare the RPC request + request, err := newRequest(blockResultsMethod, params) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultBlockResults{}) + + return nil +} + +func (b *RPCBatch) Commit(height *int64) error { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + // Prepare the RPC request + request, err := newRequest(commitMethod, params) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultCommit{}) + + return nil +} + +func (b *RPCBatch) Tx(hash []byte) error { + // Prepare the RPC request + request, err := newRequest( + txMethod, + map[string]interface{}{ + "hash": hash, + }, + ) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultTx{}) + + return nil +} + +func (b *RPCBatch) Validators(height *int64) error { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + // Prepare the RPC request + request, err := newRequest(validatorsMethod, params) + if err != nil { + return fmt.Errorf("unable to create request, %w", err) + } + + b.addRequest(request, &ctypes.ResultValidators{}) + + return nil +} diff --git a/tm2/pkg/bft/rpc/client/batch_test.go b/tm2/pkg/bft/rpc/client/batch_test.go new file mode 100644 index 00000000000..52930e5c372 --- /dev/null +++ b/tm2/pkg/bft/rpc/client/batch_test.go @@ -0,0 +1,515 @@ +package client + +import ( + "context" + "testing" + + "github.com/gnolang/gno/tm2/pkg/amino" + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// generateMockBatchClient generates a common +// mock batch handling client +func generateMockBatchClient( + t *testing.T, + method string, + expectedRequests int, + commonResult any, +) *mockClient { + t.Helper() + + return &mockClient{ + sendBatchFn: func(_ context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + require.Len(t, requests, expectedRequests) + + responses := make(types.RPCResponses, len(requests)) + + for index, request := range requests { + require.Equal(t, "2.0", request.JSONRPC) + require.NotEmpty(t, request.ID) + require.Equal(t, method, request.Method) + + result, err := amino.MarshalJSON(commonResult) + require.NoError(t, err) + + response := types.RPCResponse{ + JSONRPC: "2.0", + ID: request.ID, + Result: result, + Error: nil, + } + + responses[index] = response + } + + return responses, nil + }, + } +} + +func TestRPCBatch_Count(t *testing.T) { + t.Parallel() + + var ( + c = NewRPCClient(&mockClient{}) + batch = c.NewBatch() + ) + + // Make sure the batch is initially empty + assert.Equal(t, 0, batch.Count()) + + // Add a dummy request + require.NoError(t, batch.Status()) + + // Make sure the request is enqueued + assert.Equal(t, 1, batch.Count()) +} + +func TestRPCBatch_Clear(t *testing.T) { + t.Parallel() + + var ( + c = NewRPCClient(&mockClient{}) + batch = c.NewBatch() + ) + + // Add a dummy request + require.NoError(t, batch.Status()) + + // Make sure the request is enqueued + assert.Equal(t, 1, batch.Count()) + + // Clear the batch + assert.Equal(t, 1, batch.Clear()) + + // Make sure no request is enqueued + assert.Equal(t, 0, batch.Count()) +} + +func TestRPCBatch_Send(t *testing.T) { + t.Parallel() + + t.Run("empty batch", func(t *testing.T) { + t.Parallel() + + var ( + c = NewRPCClient(&mockClient{}) + batch = c.NewBatch() + ) + + res, err := batch.Send(context.Background()) + + assert.ErrorIs(t, err, errEmptyBatch) + assert.Nil(t, res) + }) + + t.Run("valid batch", func(t *testing.T) { + t.Parallel() + + var ( + numRequests = 10 + expectedStatus = &ctypes.ResultStatus{ + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + } + + mockClient = generateMockBatchClient(t, statusMethod, 10, expectedStatus) + + c = NewRPCClient(mockClient) + batch = c.NewBatch() + ) + + // Enqueue the requests + for i := 0; i < numRequests; i++ { + require.NoError(t, batch.Status()) + } + + // Send the batch + results, err := batch.Send(context.Background()) + require.NoError(t, err) + + // Validate the results + assert.Len(t, results, numRequests) + + for _, result := range results { + castResult, ok := result.(*ctypes.ResultStatus) + require.True(t, ok) + + assert.Equal(t, expectedStatus, castResult) + } + }) +} + +func TestRPCBatch_Endpoints(t *testing.T) { + t.Parallel() + + testTable := []struct { + method string + expectedResult any + batchCallback func(*RPCBatch) + extractCallback func(any) any + }{ + { + statusMethod, + &ctypes.ResultStatus{ + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.Status()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultStatus) + require.True(t, ok) + + return castResult + }, + }, + { + abciInfoMethod, + &ctypes.ResultABCIInfo{ + Response: abci.ResponseInfo{ + LastBlockAppHash: []byte("dummy"), + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.ABCIInfo()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultABCIInfo) + require.True(t, ok) + + return castResult + }, + }, + { + abciQueryMethod, + &ctypes.ResultABCIQuery{ + Response: abci.ResponseQuery{ + Value: []byte("dummy"), + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.ABCIQuery("path", []byte("dummy"))) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultABCIQuery) + require.True(t, ok) + + return castResult + }, + }, + { + broadcastTxCommitMethod, + &ctypes.ResultBroadcastTxCommit{ + Hash: []byte("dummy"), + }, + func(batch *RPCBatch) { + require.NoError(t, batch.BroadcastTxCommit([]byte("dummy"))) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + + return castResult + }, + }, + { + broadcastTxAsyncMethod, + &ctypes.ResultBroadcastTx{ + Hash: []byte("dummy"), + }, + func(batch *RPCBatch) { + require.NoError(t, batch.BroadcastTxAsync([]byte("dummy"))) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultBroadcastTx) + require.True(t, ok) + + return castResult + }, + }, + { + broadcastTxSyncMethod, + &ctypes.ResultBroadcastTx{ + Hash: []byte("dummy"), + }, + func(batch *RPCBatch) { + require.NoError(t, batch.BroadcastTxSync([]byte("dummy"))) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultBroadcastTx) + require.True(t, ok) + + return castResult + }, + }, + { + unconfirmedTxsMethod, + &ctypes.ResultUnconfirmedTxs{ + Count: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.UnconfirmedTxs(0)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultUnconfirmedTxs) + require.True(t, ok) + + return castResult + }, + }, + { + numUnconfirmedTxsMethod, + &ctypes.ResultUnconfirmedTxs{ + Count: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.NumUnconfirmedTxs()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultUnconfirmedTxs) + require.True(t, ok) + + return castResult + }, + }, + { + netInfoMethod, + &ctypes.ResultNetInfo{ + NPeers: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.NetInfo()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultNetInfo) + require.True(t, ok) + + return castResult + }, + }, + { + dumpConsensusStateMethod, + &ctypes.ResultDumpConsensusState{ + RoundState: &cstypes.RoundState{ + Round: 10, + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.DumpConsensusState()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultDumpConsensusState) + require.True(t, ok) + + return castResult + }, + }, + { + consensusStateMethod, + &ctypes.ResultConsensusState{ + RoundState: cstypes.RoundStateSimple{ + ProposalBlockHash: []byte("dummy"), + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.ConsensusState()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultConsensusState) + require.True(t, ok) + + return castResult + }, + }, + { + consensusParamsMethod, + &ctypes.ResultConsensusParams{ + BlockHeight: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.ConsensusParams(nil)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultConsensusParams) + require.True(t, ok) + + return castResult + }, + }, + { + healthMethod, + &ctypes.ResultHealth{}, + func(batch *RPCBatch) { + require.NoError(t, batch.Health()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultHealth) + require.True(t, ok) + + return castResult + }, + }, + { + blockchainMethod, + &ctypes.ResultBlockchainInfo{ + LastHeight: 100, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.BlockchainInfo(0, 0)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultBlockchainInfo) + require.True(t, ok) + + return castResult + }, + }, + { + genesisMethod, + &ctypes.ResultGenesis{ + Genesis: &bfttypes.GenesisDoc{ + ChainID: "dummy", + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.Genesis()) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultGenesis) + require.True(t, ok) + + return castResult + }, + }, + { + blockMethod, + &ctypes.ResultBlock{ + BlockMeta: &bfttypes.BlockMeta{ + Header: bfttypes.Header{ + Height: 10, + }, + }, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.Block(nil)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultBlock) + require.True(t, ok) + + return castResult + }, + }, + { + blockResultsMethod, + &ctypes.ResultBlockResults{ + Height: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.BlockResults(nil)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultBlockResults) + require.True(t, ok) + + return castResult + }, + }, + { + commitMethod, + &ctypes.ResultCommit{ + CanonicalCommit: true, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.Commit(nil)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultCommit) + require.True(t, ok) + + return castResult + }, + }, + { + txMethod, + &ctypes.ResultTx{ + Hash: []byte("tx hash"), + Height: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.Tx([]byte("tx hash"))) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultTx) + require.True(t, ok) + + return castResult + }, + }, + { + validatorsMethod, + &ctypes.ResultValidators{ + BlockHeight: 10, + }, + func(batch *RPCBatch) { + require.NoError(t, batch.Validators(nil)) + }, + func(result any) any { + castResult, ok := result.(*ctypes.ResultValidators) + require.True(t, ok) + + return castResult + }, + }, + } + + for _, testCase := range testTable { + testCase := testCase + + t.Run(testCase.method, func(t *testing.T) { + t.Parallel() + + var ( + numRequests = 10 + mockClient = generateMockBatchClient( + t, + testCase.method, + numRequests, + testCase.expectedResult, + ) + + c = NewRPCClient(mockClient) + batch = c.NewBatch() + ) + + // Enqueue the requests + for i := 0; i < numRequests; i++ { + testCase.batchCallback(batch) + } + + // Send the batch + results, err := batch.Send(context.Background()) + require.NoError(t, err) + + // Validate the results + assert.Len(t, results, numRequests) + + for _, result := range results { + castResult := testCase.extractCallback(result) + + assert.Equal(t, testCase.expectedResult, castResult) + } + }) + } +} diff --git a/tm2/pkg/bft/rpc/client/client.go b/tm2/pkg/bft/rpc/client/client.go new file mode 100644 index 00000000000..e7c7d578ef3 --- /dev/null +++ b/tm2/pkg/bft/rpc/client/client.go @@ -0,0 +1,377 @@ +package client + +import ( + "context" + "fmt" + "time" + + "github.com/gnolang/gno/tm2/pkg/amino" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + rpcclient "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client/batch" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client/http" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client/ws" + rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/rs/xid" +) + +const defaultTimeout = 60 * time.Second + +const ( + statusMethod = "status" + abciInfoMethod = "abci_info" + abciQueryMethod = "abci_query" + broadcastTxCommitMethod = "broadcast_tx_commit" + broadcastTxAsyncMethod = "broadcast_tx_async" + broadcastTxSyncMethod = "broadcast_tx_sync" + unconfirmedTxsMethod = "unconfirmed_txs" + numUnconfirmedTxsMethod = "num_unconfirmed_txs" + netInfoMethod = "net_info" + dumpConsensusStateMethod = "dump_consensus_state" + consensusStateMethod = "consensus_state" + consensusParamsMethod = "consensus_params" + healthMethod = "health" + blockchainMethod = "blockchain" + genesisMethod = "genesis" + blockMethod = "block" + blockResultsMethod = "block_results" + commitMethod = "commit" + txMethod = "tx" + validatorsMethod = "validators" +) + +// RPCClient encompasses common RPC client methods +type RPCClient struct { + requestTimeout time.Duration + + caller rpcclient.Client +} + +// NewRPCClient creates a new RPC client instance with the given caller +func NewRPCClient(caller rpcclient.Client, opts ...Option) *RPCClient { + c := &RPCClient{ + requestTimeout: defaultTimeout, + caller: caller, + } + + for _, opt := range opts { + opt(c) + } + + return c +} + +// NewHTTPClient takes a remote endpoint in the form ://:, +// and returns an HTTP client that communicates with a Tendermint node over +// JSON RPC. +// +// Request batching is available for JSON RPC requests over HTTP, which conforms to +// the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See +// the example for more details +func NewHTTPClient(rpcURL string) (*RPCClient, error) { + httpClient, err := http.NewClient(rpcURL) + if err != nil { + return nil, err + } + + return NewRPCClient(httpClient), nil +} + +// NewWSClient takes a remote endpoint in the form ://:, +// and returns a WS client that communicates with a Tendermint node over +// WS connection. +// +// Request batching is available for JSON RPC requests over WS, which conforms to +// the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See +// the example for more details +func NewWSClient(rpcURL string) (*RPCClient, error) { + wsClient, err := ws.NewClient(rpcURL) + if err != nil { + return nil, err + } + + return NewRPCClient(wsClient), nil +} + +// Close attempts to gracefully close the RPC client +func (c *RPCClient) Close() error { + return c.caller.Close() +} + +// NewBatch creates a new RPC batch +func (c *RPCClient) NewBatch() *RPCBatch { + return &RPCBatch{ + batch: batch.NewBatch(c.caller), + resultMap: make(map[string]any), + } +} + +func (c *RPCClient) Status() (*ctypes.ResultStatus, error) { + return sendRequestCommon[ctypes.ResultStatus]( + c.caller, + c.requestTimeout, + statusMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + return sendRequestCommon[ctypes.ResultABCIInfo]( + c.caller, + c.requestTimeout, + abciInfoMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +} + +func (c *RPCClient) ABCIQueryWithOptions(path string, data []byte, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + return sendRequestCommon[ctypes.ResultABCIQuery]( + c.caller, + c.requestTimeout, + abciQueryMethod, + map[string]any{ + "path": path, + "data": data, + "height": opts.Height, + "prove": opts.Prove, + }, + ) +} + +func (c *RPCClient) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return sendRequestCommon[ctypes.ResultBroadcastTxCommit]( + c.caller, + c.requestTimeout, + broadcastTxCommitMethod, + map[string]any{"tx": tx}, + ) +} + +func (c *RPCClient) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX(broadcastTxAsyncMethod, tx) +} + +func (c *RPCClient) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX(broadcastTxSyncMethod, tx) +} + +func (c *RPCClient) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return sendRequestCommon[ctypes.ResultBroadcastTx]( + c.caller, + c.requestTimeout, + route, + map[string]any{"tx": tx}, + ) +} + +func (c *RPCClient) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { + return sendRequestCommon[ctypes.ResultUnconfirmedTxs]( + c.caller, + c.requestTimeout, + unconfirmedTxsMethod, + map[string]any{"limit": limit}, + ) +} + +func (c *RPCClient) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { + return sendRequestCommon[ctypes.ResultUnconfirmedTxs]( + c.caller, + c.requestTimeout, + numUnconfirmedTxsMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) NetInfo() (*ctypes.ResultNetInfo, error) { + return sendRequestCommon[ctypes.ResultNetInfo]( + c.caller, + c.requestTimeout, + netInfoMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + return sendRequestCommon[ctypes.ResultDumpConsensusState]( + c.caller, + c.requestTimeout, + dumpConsensusStateMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) { + return sendRequestCommon[ctypes.ResultConsensusState]( + c.caller, + c.requestTimeout, + consensusStateMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + return sendRequestCommon[ctypes.ResultConsensusParams]( + c.caller, + c.requestTimeout, + consensusParamsMethod, + params, + ) +} + +func (c *RPCClient) Health() (*ctypes.ResultHealth, error) { + return sendRequestCommon[ctypes.ResultHealth]( + c.caller, + c.requestTimeout, + healthMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return sendRequestCommon[ctypes.ResultBlockchainInfo]( + c.caller, + c.requestTimeout, + blockchainMethod, + map[string]any{ + "minHeight": minHeight, + "maxHeight": maxHeight, + }, + ) +} + +func (c *RPCClient) Genesis() (*ctypes.ResultGenesis, error) { + return sendRequestCommon[ctypes.ResultGenesis]( + c.caller, + c.requestTimeout, + genesisMethod, + map[string]any{}, + ) +} + +func (c *RPCClient) Block(height *int64) (*ctypes.ResultBlock, error) { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + return sendRequestCommon[ctypes.ResultBlock]( + c.caller, + c.requestTimeout, + blockMethod, + params, + ) +} + +func (c *RPCClient) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + return sendRequestCommon[ctypes.ResultBlockResults]( + c.caller, + c.requestTimeout, + blockResultsMethod, + params, + ) +} + +func (c *RPCClient) Commit(height *int64) (*ctypes.ResultCommit, error) { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + return sendRequestCommon[ctypes.ResultCommit]( + c.caller, + c.requestTimeout, + commitMethod, + params, + ) +} + +func (c *RPCClient) Tx(hash []byte) (*ctypes.ResultTx, error) { + return sendRequestCommon[ctypes.ResultTx]( + c.caller, + c.requestTimeout, + txMethod, + map[string]interface{}{ + "hash": hash, + }, + ) +} + +func (c *RPCClient) Validators(height *int64) (*ctypes.ResultValidators, error) { + params := map[string]any{} + if height != nil { + params["height"] = height + } + + return sendRequestCommon[ctypes.ResultValidators]( + c.caller, + c.requestTimeout, + validatorsMethod, + params, + ) +} + +// newRequest creates a new request based on the method +// and given params +func newRequest(method string, params map[string]any) (rpctypes.RPCRequest, error) { + id := rpctypes.JSONRPCStringID(xid.New().String()) + + return rpctypes.MapToRequest(id, method, params) +} + +// sendRequestCommon is the common request creation, sending, and parsing middleware +func sendRequestCommon[T any]( + caller rpcclient.Client, + timeout time.Duration, + method string, + params map[string]any, +) (*T, error) { + // Prepare the RPC request + request, err := newRequest(method, params) + if err != nil { + return nil, err + } + + // Send the request + ctx, cancelFn := context.WithTimeout(context.Background(), timeout) + defer cancelFn() + + response, err := caller.SendRequest(ctx, request) + if err != nil { + return nil, fmt.Errorf("unable to call RPC method %s, %w", method, err) + } + + // Parse the response + if response.Error != nil { + return nil, response.Error + } + + // Unmarshal the RPC response + return unmarshalResponseBytes[T](response.Result) +} + +// unmarshalResponseBytes Amino JSON-unmarshals the RPC response data +func unmarshalResponseBytes[T any](responseBytes []byte) (*T, error) { + var result T + + // Amino JSON-unmarshal the RPC response data + if err := amino.UnmarshalJSON(responseBytes, &result); err != nil { + return nil, fmt.Errorf("unable to unmarshal response bytes, %w", err) + } + + return &result, nil +} diff --git a/tm2/pkg/bft/rpc/client/client_test.go b/tm2/pkg/bft/rpc/client/client_test.go new file mode 100644 index 00000000000..cb88c91fc5f --- /dev/null +++ b/tm2/pkg/bft/rpc/client/client_test.go @@ -0,0 +1,871 @@ +package client + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/gnolang/gno/tm2/pkg/amino" + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// generateMockRequestClient generates a single RPC request mock client +func generateMockRequestClient( + t *testing.T, + method string, + verifyParamsFn func(*testing.T, map[string]any), + responseData any, +) *mockClient { + t.Helper() + + return &mockClient{ + sendRequestFn: func( + _ context.Context, + request types.RPCRequest, + ) (*types.RPCResponse, error) { + // Validate the request + require.Equal(t, "2.0", request.JSONRPC) + require.NotNil(t, request.ID) + require.Equal(t, request.Method, method) + + // Validate the params + var params map[string]any + require.NoError(t, json.Unmarshal(request.Params, ¶ms)) + + verifyParamsFn(t, params) + + // Prepare the result + result, err := amino.MarshalJSON(responseData) + require.NoError(t, err) + + // Prepare the response + response := &types.RPCResponse{ + JSONRPC: "2.0", + ID: request.ID, + Result: result, + Error: nil, + } + + return response, nil + }, + } +} + +// generateMockRequestsClient generates a batch RPC request mock client +func generateMockRequestsClient( + t *testing.T, + method string, + verifyParamsFn func(*testing.T, map[string]any), + responseData []any, +) *mockClient { + t.Helper() + + return &mockClient{ + sendBatchFn: func( + _ context.Context, + requests types.RPCRequests, + ) (types.RPCResponses, error) { + responses := make(types.RPCResponses, 0, len(requests)) + + // Validate the requests + for index, r := range requests { + require.Equal(t, "2.0", r.JSONRPC) + require.NotNil(t, r.ID) + require.Equal(t, r.Method, method) + + // Validate the params + var params map[string]any + require.NoError(t, json.Unmarshal(r.Params, ¶ms)) + + verifyParamsFn(t, params) + + // Prepare the result + result, err := amino.MarshalJSON(responseData[index]) + require.NoError(t, err) + + // Prepare the response + response := types.RPCResponse{ + JSONRPC: "2.0", + ID: r.ID, + Result: result, + Error: nil, + } + + responses = append(responses, response) + } + + return responses, nil + }, + } +} + +func TestRPCClient_Status(t *testing.T) { + t.Parallel() + + var ( + expectedStatus = &ctypes.ResultStatus{ + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + statusMethod, + verifyFn, + expectedStatus, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the status + status, err := c.Status() + require.NoError(t, err) + + assert.Equal(t, expectedStatus, status) +} + +func TestRPCClient_ABCIInfo(t *testing.T) { + t.Parallel() + + var ( + expectedInfo = &ctypes.ResultABCIInfo{ + Response: abci.ResponseInfo{ + LastBlockAppHash: []byte("dummy"), + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + abciInfoMethod, + verifyFn, + expectedInfo, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the info + info, err := c.ABCIInfo() + require.NoError(t, err) + + assert.Equal(t, expectedInfo, info) +} + +func TestRPCClient_ABCIQuery(t *testing.T) { + t.Parallel() + + var ( + path = "path" + data = []byte("data") + opts = DefaultABCIQueryOptions + + expectedQuery = &ctypes.ResultABCIQuery{ + Response: abci.ResponseQuery{ + Value: []byte("dummy"), + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, path, params["path"]) + assert.Equal(t, base64.StdEncoding.EncodeToString(data), params["data"]) + assert.Equal(t, fmt.Sprintf("%d", opts.Height), params["height"]) + assert.Equal(t, opts.Prove, params["prove"]) + } + + mockClient = generateMockRequestClient( + t, + abciQueryMethod, + verifyFn, + expectedQuery, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the query + query, err := c.ABCIQuery(path, data) + require.NoError(t, err) + + assert.Equal(t, expectedQuery, query) +} + +func TestRPCClient_BroadcastTxCommit(t *testing.T) { + t.Parallel() + + var ( + tx = []byte("tx") + + expectedTxCommit = &ctypes.ResultBroadcastTxCommit{ + Hash: []byte("dummy"), + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, base64.StdEncoding.EncodeToString(tx), params["tx"]) + } + + mockClient = generateMockRequestClient( + t, + broadcastTxCommitMethod, + verifyFn, + expectedTxCommit, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the broadcast + txCommit, err := c.BroadcastTxCommit(tx) + require.NoError(t, err) + + assert.Equal(t, expectedTxCommit, txCommit) +} + +func TestRPCClient_BroadcastTxAsync(t *testing.T) { + t.Parallel() + + var ( + tx = []byte("tx") + + expectedTxBroadcast = &ctypes.ResultBroadcastTx{ + Hash: []byte("dummy"), + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, base64.StdEncoding.EncodeToString(tx), params["tx"]) + } + + mockClient = generateMockRequestClient( + t, + broadcastTxAsyncMethod, + verifyFn, + expectedTxBroadcast, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the broadcast + txAsync, err := c.BroadcastTxAsync(tx) + require.NoError(t, err) + + assert.Equal(t, expectedTxBroadcast, txAsync) +} + +func TestRPCClient_BroadcastTxSync(t *testing.T) { + t.Parallel() + + var ( + tx = []byte("tx") + + expectedTxBroadcast = &ctypes.ResultBroadcastTx{ + Hash: []byte("dummy"), + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, base64.StdEncoding.EncodeToString(tx), params["tx"]) + } + + mockClient = generateMockRequestClient( + t, + broadcastTxSyncMethod, + verifyFn, + expectedTxBroadcast, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the broadcast + txSync, err := c.BroadcastTxSync(tx) + require.NoError(t, err) + + assert.Equal(t, expectedTxBroadcast, txSync) +} + +func TestRPCClient_UnconfirmedTxs(t *testing.T) { + t.Parallel() + + var ( + limit = 10 + + expectedResult = &ctypes.ResultUnconfirmedTxs{ + Count: 10, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", limit), params["limit"]) + } + + mockClient = generateMockRequestClient( + t, + unconfirmedTxsMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.UnconfirmedTxs(limit) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_NumUnconfirmedTxs(t *testing.T) { + t.Parallel() + + var ( + expectedResult = &ctypes.ResultUnconfirmedTxs{ + Count: 10, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + numUnconfirmedTxsMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.NumUnconfirmedTxs() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_NetInfo(t *testing.T) { + t.Parallel() + + var ( + expectedResult = &ctypes.ResultNetInfo{ + NPeers: 10, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + netInfoMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.NetInfo() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_DumpConsensusState(t *testing.T) { + t.Parallel() + + var ( + expectedResult = &ctypes.ResultDumpConsensusState{ + RoundState: &cstypes.RoundState{ + Round: 10, + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + dumpConsensusStateMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.DumpConsensusState() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_ConsensusState(t *testing.T) { + t.Parallel() + + var ( + expectedResult = &ctypes.ResultConsensusState{ + RoundState: cstypes.RoundStateSimple{ + ProposalBlockHash: []byte("dummy"), + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + consensusStateMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.ConsensusState() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_ConsensusParams(t *testing.T) { + t.Parallel() + + var ( + blockHeight = int64(10) + + expectedResult = &ctypes.ResultConsensusParams{ + BlockHeight: blockHeight, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", blockHeight), params["height"]) + } + + mockClient = generateMockRequestClient( + t, + consensusParamsMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.ConsensusParams(&blockHeight) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Health(t *testing.T) { + t.Parallel() + + var ( + expectedResult = &ctypes.ResultHealth{} + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + healthMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.Health() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_BlockchainInfo(t *testing.T) { + t.Parallel() + + var ( + minHeight = int64(5) + maxHeight = int64(10) + + expectedResult = &ctypes.ResultBlockchainInfo{ + LastHeight: 100, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", minHeight), params["minHeight"]) + assert.Equal(t, fmt.Sprintf("%d", maxHeight), params["maxHeight"]) + } + + mockClient = generateMockRequestClient( + t, + blockchainMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.BlockchainInfo(minHeight, maxHeight) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Genesis(t *testing.T) { + t.Parallel() + + var ( + expectedResult = &ctypes.ResultGenesis{ + Genesis: &bfttypes.GenesisDoc{ + ChainID: "dummy", + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestClient( + t, + genesisMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.Genesis() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Block(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + + expectedResult = &ctypes.ResultBlock{ + BlockMeta: &bfttypes.BlockMeta{ + Header: bfttypes.Header{ + Height: height, + }, + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + } + + mockClient = generateMockRequestClient( + t, + blockMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.Block(&height) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_BlockResults(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + + expectedResult = &ctypes.ResultBlockResults{ + Height: height, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + } + + mockClient = generateMockRequestClient( + t, + blockResultsMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.BlockResults(&height) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Commit(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + + expectedResult = &ctypes.ResultCommit{ + CanonicalCommit: true, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + } + + mockClient = generateMockRequestClient( + t, + commitMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.Commit(&height) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Tx(t *testing.T) { + t.Parallel() + + var ( + hash = []byte("tx hash") + + expectedResult = &ctypes.ResultTx{ + Hash: hash, + Height: 10, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, base64.StdEncoding.EncodeToString(hash), params["hash"]) + } + + mockClient = generateMockRequestClient( + t, + txMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.Tx(hash) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Validators(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + + expectedResult = &ctypes.ResultValidators{ + BlockHeight: height, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + } + + mockClient = generateMockRequestClient( + t, + validatorsMethod, + verifyFn, + expectedResult, + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Get the result + result, err := c.Validators(&height) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) +} + +func TestRPCClient_Batch(t *testing.T) { + t.Parallel() + + convertResults := func(results []*ctypes.ResultStatus) []any { + res := make([]any, len(results)) + + for index, item := range results { + res[index] = item + } + + return res + } + + var ( + expectedStatuses = []*ctypes.ResultStatus{ + { + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + }, + { + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + }, + { + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + }, + } + + verifyFn = func(t *testing.T, params map[string]any) { + t.Helper() + + assert.Len(t, params, 0) + } + + mockClient = generateMockRequestsClient( + t, + statusMethod, + verifyFn, + convertResults(expectedStatuses), + ) + ) + + // Create the client + c := NewRPCClient(mockClient) + + // Create the batch + batch := c.NewBatch() + + require.NoError(t, batch.Status()) + require.NoError(t, batch.Status()) + require.NoError(t, batch.Status()) + + require.EqualValues(t, 3, batch.Count()) + + // Send the batch + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + results, err := batch.Send(ctx) + require.NoError(t, err) + + require.Len(t, results, len(expectedStatuses)) + + for index, result := range results { + castResult, ok := result.(*ctypes.ResultStatus) + require.True(t, ok) + + assert.Equal(t, expectedStatuses[index], castResult) + } +} diff --git a/tm2/pkg/bft/rpc/client/doc.go b/tm2/pkg/bft/rpc/client/doc.go new file mode 100644 index 00000000000..a243dea1046 --- /dev/null +++ b/tm2/pkg/bft/rpc/client/doc.go @@ -0,0 +1,18 @@ +// Package client provides a general purpose interface (Client) for connecting +// to a tendermint node, as well as higher-level functionality. +// +// The main implementation for production code is client.HTTP, which +// connects via http to the jsonrpc interface of the tendermint node. +// +// For connecting to a node running in the same process (eg. when +// compiling the abci app in the same process), you can use the client.Local +// implementation. +// +// For mocking out server responses during testing to see behavior for +// arbitrary return values, use the mock package. +// +// In addition to the Client interface, which should be used externally +// for maximum flexibility and testability, and two implementations, +// this package also provides helper functions that work on any Client +// implementation. +package client diff --git a/tm2/pkg/bft/rpc/client/e2e_test.go b/tm2/pkg/bft/rpc/client/e2e_test.go new file mode 100644 index 00000000000..08d4b9b735d --- /dev/null +++ b/tm2/pkg/bft/rpc/client/e2e_test.go @@ -0,0 +1,454 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/gnolang/gno/tm2/pkg/amino" + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// createTestServer creates a test RPC server +func createTestServer( + t *testing.T, + handler http.Handler, +) *httptest.Server { + t.Helper() + + s := httptest.NewServer(handler) + t.Cleanup(s.Close) + + return s +} + +// defaultHTTPHandler generates a default HTTP test handler +func defaultHTTPHandler( + t *testing.T, + method string, + responseResult any, +) http.HandlerFunc { + t.Helper() + + return func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, http.MethodPost, r.Method) + require.Equal(t, "application/json", r.Header.Get("content-type")) + + // Parse the message + var req types.RPCRequest + require.NoError(t, json.NewDecoder(r.Body).Decode(&req)) + + // Basic request validation + require.Equal(t, req.JSONRPC, "2.0") + require.Equal(t, req.Method, method) + + // Marshal the result data to Amino JSON + result, err := amino.MarshalJSON(responseResult) + require.NoError(t, err) + + // Send a response back + response := types.RPCResponse{ + JSONRPC: "2.0", + ID: req.ID, + Result: result, + } + + // Marshal the response + marshalledResponse, err := json.Marshal(response) + require.NoError(t, err) + + _, err = w.Write(marshalledResponse) + require.NoError(t, err) + } +} + +// defaultWSHandler generates a default WS test handler +func defaultWSHandler( + t *testing.T, + method string, + responseResult any, +) http.HandlerFunc { + t.Helper() + + upgrader := websocket.Upgrader{} + + return func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + require.NoError(t, err) + + defer c.Close() + + for { + mt, message, err := c.ReadMessage() + if websocket.IsUnexpectedCloseError(err) { + return + } + + require.NoError(t, err) + + // Parse the message + var req types.RPCRequest + require.NoError(t, json.Unmarshal(message, &req)) + + // Basic request validation + require.Equal(t, req.JSONRPC, "2.0") + require.Equal(t, req.Method, method) + + // Marshal the result data to Amino JSON + result, err := amino.MarshalJSON(responseResult) + require.NoError(t, err) + + // Send a response back + response := types.RPCResponse{ + JSONRPC: "2.0", + ID: req.ID, + Result: result, + } + + // Marshal the response + marshalledResponse, err := json.Marshal(response) + require.NoError(t, err) + + require.NoError(t, c.WriteMessage(mt, marshalledResponse)) + } + } +} + +type e2eTestCase struct { + name string + client *RPCClient +} + +// generateE2ETestCases generates RPC client test cases (HTTP / WS) +func generateE2ETestCases( + t *testing.T, + method string, + responseResult any, +) []e2eTestCase { + t.Helper() + + // Create the http client + httpServer := createTestServer(t, defaultHTTPHandler(t, method, responseResult)) + httpClient, err := NewHTTPClient(httpServer.URL) + require.NoError(t, err) + + // Create the WS client + wsServer := createTestServer(t, defaultWSHandler(t, method, responseResult)) + wsClient, err := NewWSClient("ws" + strings.TrimPrefix(wsServer.URL, "http")) + require.NoError(t, err) + + return []e2eTestCase{ + { + name: "http", + client: httpClient, + }, + { + name: "ws", + client: wsClient, + }, + } +} + +func TestRPCClient_E2E_Endpoints(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + expectedResult any + verifyFn func(*RPCClient, any) + }{ + { + statusMethod, + &ctypes.ResultStatus{ + NodeInfo: p2p.NodeInfo{ + Moniker: "dummy", + }, + }, + func(client *RPCClient, expectedResult any) { + status, err := client.Status() + require.NoError(t, err) + + assert.Equal(t, expectedResult, status) + }, + }, + { + abciInfoMethod, + &ctypes.ResultABCIInfo{ + Response: abci.ResponseInfo{ + LastBlockAppHash: []byte("dummy"), + }, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.ABCIInfo() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + abciQueryMethod, + &ctypes.ResultABCIQuery{ + Response: abci.ResponseQuery{ + Value: []byte("dummy"), + }, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.ABCIQuery("path", []byte("dummy")) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + broadcastTxCommitMethod, + &ctypes.ResultBroadcastTxCommit{ + Hash: []byte("dummy"), + }, + func(client *RPCClient, expectedResult any) { + result, err := client.BroadcastTxCommit([]byte("dummy")) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + broadcastTxAsyncMethod, + &ctypes.ResultBroadcastTx{ + Hash: []byte("dummy"), + }, + func(client *RPCClient, expectedResult any) { + result, err := client.BroadcastTxAsync([]byte("dummy")) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + broadcastTxSyncMethod, + &ctypes.ResultBroadcastTx{ + Hash: []byte("dummy"), + }, + func(client *RPCClient, expectedResult any) { + result, err := client.BroadcastTxSync([]byte("dummy")) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + unconfirmedTxsMethod, + &ctypes.ResultUnconfirmedTxs{ + Count: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.UnconfirmedTxs(0) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + numUnconfirmedTxsMethod, + &ctypes.ResultUnconfirmedTxs{ + Count: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.NumUnconfirmedTxs() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + netInfoMethod, + &ctypes.ResultNetInfo{ + NPeers: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.NetInfo() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + dumpConsensusStateMethod, + &ctypes.ResultDumpConsensusState{ + RoundState: &cstypes.RoundState{ + Round: 10, + }, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.DumpConsensusState() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + consensusStateMethod, + &ctypes.ResultConsensusState{ + RoundState: cstypes.RoundStateSimple{ + ProposalBlockHash: []byte("dummy"), + }, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.ConsensusState() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + consensusParamsMethod, + &ctypes.ResultConsensusParams{ + BlockHeight: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.ConsensusParams(nil) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + healthMethod, + &ctypes.ResultHealth{}, + func(client *RPCClient, expectedResult any) { + result, err := client.Health() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + blockchainMethod, + &ctypes.ResultBlockchainInfo{ + LastHeight: 100, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.BlockchainInfo(0, 0) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + genesisMethod, + &ctypes.ResultGenesis{ + Genesis: &bfttypes.GenesisDoc{ + ChainID: "dummy", + }, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.Genesis() + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + blockMethod, + &ctypes.ResultBlock{ + BlockMeta: &bfttypes.BlockMeta{ + Header: bfttypes.Header{ + Height: 10, + }, + }, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.Block(nil) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + blockResultsMethod, + &ctypes.ResultBlockResults{ + Height: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.BlockResults(nil) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + commitMethod, + &ctypes.ResultCommit{ + CanonicalCommit: true, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.Commit(nil) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + txMethod, + &ctypes.ResultTx{ + Hash: []byte("tx hash"), + Height: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.Tx([]byte("tx hash")) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + { + validatorsMethod, + &ctypes.ResultValidators{ + BlockHeight: 10, + }, + func(client *RPCClient, expectedResult any) { + result, err := client.Validators(nil) + require.NoError(t, err) + + assert.Equal(t, expectedResult, result) + }, + }, + } + + for _, testCase := range testTable { + testCase := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + clientTable := generateE2ETestCases( + t, + testCase.name, + testCase.expectedResult, + ) + + for _, clientCase := range clientTable { + clientCase := clientCase + + t.Run(clientCase.name, func(t *testing.T) { + t.Parallel() + + defer func() { + require.NoError(t, clientCase.client.Close()) + }() + + testCase.verifyFn(clientCase.client, testCase.expectedResult) + }) + } + }) + } +} diff --git a/tm2/pkg/bft/rpc/client/examples_test.go b/tm2/pkg/bft/rpc/client/examples_test.go deleted file mode 100644 index 287a63164d2..00000000000 --- a/tm2/pkg/bft/rpc/client/examples_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package client_test - -import ( - "bytes" - "fmt" - - "github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctest "github.com/gnolang/gno/tm2/pkg/bft/rpc/test" -) - -func ExampleHTTP_simple() { - // Start a tendermint node (and kvstore) in the background to test against - app := kvstore.NewKVStoreApplication() - node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig) - defer rpctest.StopTendermint(node) - - // Create our RPC client - cfg, _ := rpctest.GetConfig() - rpcAddr := cfg.RPC.ListenAddress - c := client.NewHTTP(rpcAddr, "/websocket") - - // Create a transaction - k := []byte("name") - v := []byte("satoshi") - tx := append(k, append([]byte("="), v...)...) - - // Broadcast the transaction and wait for it to commit (rather use - // c.BroadcastTxSync though in production) - bres, err := c.BroadcastTxCommit(tx) - if err != nil { - panic(err) - } - if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { - panic("BroadcastTxCommit transaction failed") - } - - // Now try to fetch the value for the key - qres, err := c.ABCIQuery("/key", k) - if err != nil { - panic(err) - } - if qres.Response.IsErr() { - panic("ABCIQuery failed") - } - if !bytes.Equal(qres.Response.Key, k) { - panic("returned key does not match queried key") - } - if !bytes.Equal(qres.Response.Value, v) { - panic("returned value does not match sent value") - } - - fmt.Println("Sent tx :", string(tx)) - fmt.Println("Queried for :", string(qres.Response.Key)) - fmt.Println("Got value :", string(qres.Response.Value)) - - // Output: - // Sent tx : name=satoshi - // Queried for : name - // Got value : satoshi -} - -func ExampleHTTP_batching() { - // Start a tendermint node (and kvstore) in the background to test against - app := kvstore.NewKVStoreApplication() - node := rpctest.StartTendermint(app, rpctest.RecreateConfig) - defer rpctest.StopTendermint(node) - - // Create our RPC client - cfg, _ := rpctest.GetConfig() - rpcAddr := cfg.RPC.ListenAddress - c := client.NewHTTP(rpcAddr, "/websocket") - - // Create our two transactions - k1 := []byte("firstName") - v1 := []byte("satoshi") - tx1 := append(k1, append([]byte("="), v1...)...) - - k2 := []byte("lastName") - v2 := []byte("nakamoto") - tx2 := append(k2, append([]byte("="), v2...)...) - - txs := [][]byte{tx1, tx2} - - // Create a new batch - batch := c.NewBatch() - - // Queue up our transactions - for _, tx := range txs { - if _, err := batch.BroadcastTxCommit(tx); err != nil { - panic(err) - } - } - - // Send the batch of 2 transactions - if _, err := batch.Send(); err != nil { - panic(err) - } - - // Now let's query for the original results as a batch - keys := [][]byte{k1, k2} - for _, key := range keys { - if _, err := batch.ABCIQuery("/key", key); err != nil { - panic(err) - } - } - - // Send the 2 queries and keep the results - results, err := batch.Send() - if err != nil { - panic(err) - } - - // Each result in the returned list is the deserialized result of each - // respective ABCIQuery response - for _, result := range results { - qr, ok := result.(*ctypes.ResultABCIQuery) - if !ok { - panic("invalid result type from ABCIQuery request") - } - fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) - } - - // Output: - // firstName = satoshi - // lastName = nakamoto -} diff --git a/tm2/pkg/bft/rpc/client/helpers.go b/tm2/pkg/bft/rpc/client/helpers.go deleted file mode 100644 index a3299909f82..00000000000 --- a/tm2/pkg/bft/rpc/client/helpers.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "time" - - "github.com/gnolang/gno/tm2/pkg/errors" -) - -// Waiter is informed of current height, decided whether to quit early -type Waiter func(delta int64) (abort error) - -// DefaultWaitStrategy is the standard backoff algorithm, -// but you can plug in another one -func DefaultWaitStrategy(delta int64) (abort error) { - if delta > 10 { - return errors.New("waiting for %d blocks... aborting", delta) - } else if delta > 0 { - // estimate of wait time.... - // wait half a second for the next block (in progress) - // plus one second for every full block - delay := time.Duration(delta-1)*time.Second + 500*time.Millisecond - time.Sleep(delay) - } - return nil -} - -// Wait for height will poll status at reasonable intervals until -// the block at the given height is available. -// -// If waiter is nil, we use DefaultWaitStrategy, but you can also -// provide your own implementation -func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { - if waiter == nil { - waiter = DefaultWaitStrategy - } - delta := int64(1) - for delta > 0 { - s, err := c.Status() - if err != nil { - return err - } - delta = h - s.SyncInfo.LatestBlockHeight - // wait for the time, or abort early - if err := waiter(delta); err != nil { - return err - } - } - return nil -} diff --git a/tm2/pkg/bft/rpc/client/helpers_test.go b/tm2/pkg/bft/rpc/client/helpers_test.go deleted file mode 100644 index 4d0b54c2358..00000000000 --- a/tm2/pkg/bft/rpc/client/helpers_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package client_test - -import ( - "errors" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client/mock" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - "github.com/gnolang/gno/tm2/pkg/random" -) - -func TestWaitForHeight(t *testing.T) { - t.Parallel() - - assert, require := assert.New(t), require.New(t) - - // test with error result - immediate failure - m := &mock.StatusMock{ - Call: mock.Call{ - Error: errors.New("bye"), - }, - } - r := mock.NewStatusRecorder(m) - - // connection failure always leads to error - err := client.WaitForHeight(r, 8, nil) - require.NotNil(err) - require.Equal("bye", err.Error()) - // we called status once to check - require.Equal(1, len(r.Calls)) - - // now set current block height to 10 - m.Call = mock.Call{ - Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}}, - } - - // we will not wait for more than 10 blocks - err = client.WaitForHeight(r, 40, nil) - require.NotNil(err) - require.True(strings.Contains(err.Error(), "aborting")) - // we called status once more to check - require.Equal(2, len(r.Calls)) - - // waiting for the past returns immediately - err = client.WaitForHeight(r, 5, nil) - require.Nil(err) - // we called status once more to check - require.Equal(3, len(r.Calls)) - - // since we can't update in a background goroutine (test --race) - // we use the callback to update the status height - myWaiter := func(delta int64) error { - // update the height for the next call - m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}} - return client.DefaultWaitStrategy(delta) - } - - // we wait for a few blocks - err = client.WaitForHeight(r, 12, myWaiter) - require.Nil(err) - // we called status once to check - require.Equal(5, len(r.Calls)) - - pre := r.Calls[3] - require.Nil(pre.Error) - prer, ok := pre.Response.(*ctypes.ResultStatus) - require.True(ok) - assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) - - post := r.Calls[4] - require.Nil(post.Error) - postr, ok := post.Response.(*ctypes.ResultStatus) - require.True(ok) - assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) -} - -// MakeTxKV returns a text transaction, allong with expected key, value pair -func MakeTxKV() ([]byte, []byte, []byte) { - k := []byte(random.RandStr(8)) - v := []byte(random.RandStr(8)) - return k, v, append(k, append([]byte("="), v...)...) -} diff --git a/tm2/pkg/bft/rpc/client/httpclient.go b/tm2/pkg/bft/rpc/client/httpclient.go deleted file mode 100644 index 51d2e1c3fca..00000000000 --- a/tm2/pkg/bft/rpc/client/httpclient.go +++ /dev/null @@ -1,333 +0,0 @@ -package client - -import ( - "net/http" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpcclient "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/errors" -) - -/* -HTTP is a Client implementation that communicates with a Tendermint node over -JSON RPC and WebSockets. - -This is the main implementation you probably want to use in production code. -There are other implementations when calling the Tendermint node in-process -(Local), or when you want to mock out the server for test code (mock). - -Request batching is available for JSON RPC requests over HTTP, which conforms to -the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See -the example for more details. -*/ -type HTTP struct { - remote string - rpc *rpcclient.JSONRPCClient - - *baseRPCClient -} - -// BatchHTTP provides the same interface as `HTTP`, but allows for batching of -// requests (as per https://www.jsonrpc.org/specification#batch). Do not -// instantiate directly - rather use the HTTP.NewBatch() method to create an -// instance of this struct. -// -// Batching of HTTP requests is thread-safe in the sense that multiple -// goroutines can each create their own batches and send them using the same -// HTTP client. Multiple goroutines could also enqueue transactions in a single -// batch, but ordering of transactions in the batch cannot be guaranteed in such -// an example. -type BatchHTTP struct { - rpcBatch *rpcclient.JSONRPCRequestBatch - *baseRPCClient -} - -// rpcClient is an internal interface to which our RPC clients (batch and -// non-batch) must conform. Acts as an additional code-level sanity check to -// make sure the implementations stay coherent. -type rpcClient interface { - ABCIClient - HistoryClient - NetworkClient - SignClient - StatusClient - MempoolClient -} - -// baseRPCClient implements the basic RPC method logic without the actual -// underlying RPC call functionality, which is provided by `caller`. -type baseRPCClient struct { - caller rpcclient.JSONRPCCaller -} - -var ( - _ rpcClient = (*HTTP)(nil) - _ rpcClient = (*BatchHTTP)(nil) - _ rpcClient = (*baseRPCClient)(nil) -) - -// ----------------------------------------------------------------------------- -// HTTP - -// NewHTTP takes a remote endpoint in the form ://: and -// the websocket path (which always seems to be "/websocket") -// The function panics if the provided remote is invalid. -func NewHTTP(remote, wsEndpoint string) *HTTP { - httpClient := rpcclient.DefaultHTTPClient(remote) - return NewHTTPWithClient(remote, wsEndpoint, httpClient) -} - -// NewHTTPWithClient allows for setting a custom http client. See NewHTTP -// The function panics if the provided client is nil or remote is invalid. -func NewHTTPWithClient(remote, wsEndpoint string, client *http.Client) *HTTP { - if client == nil { - panic("nil http.Client provided") - } - rc := rpcclient.NewJSONRPCClientWithHTTPClient(remote, client) - - return &HTTP{ - rpc: rc, - remote: remote, - baseRPCClient: &baseRPCClient{caller: rc}, - } -} - -var _ Client = (*HTTP)(nil) - -// NewBatch creates a new batch client for this HTTP client. -func (c *HTTP) NewBatch() *BatchHTTP { - rpcBatch := c.rpc.NewRequestBatch() - return &BatchHTTP{ - rpcBatch: rpcBatch, - baseRPCClient: &baseRPCClient{ - caller: rpcBatch, - }, - } -} - -// ----------------------------------------------------------------------------- -// BatchHTTP - -// Send is a convenience function for an HTTP batch that will trigger the -// compilation of the batched requests and send them off using the client as a -// single request. On success, this returns a list of the deserialized results -// from each request in the sent batch. -func (b *BatchHTTP) Send() ([]interface{}, error) { - return b.rpcBatch.Send() -} - -// Clear will empty out this batch of requests and return the number of requests -// that were cleared out. -func (b *BatchHTTP) Clear() int { - return b.rpcBatch.Clear() -} - -// Count returns the number of enqueued requests waiting to be sent. -func (b *BatchHTTP) Count() int { - return b.rpcBatch.Count() -} - -// ----------------------------------------------------------------------------- -// baseRPCClient - -func (c *baseRPCClient) Status() (*ctypes.ResultStatus, error) { - result := new(ctypes.ResultStatus) - _, err := c.caller.Call("status", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "Status") - } - return result, nil -} - -func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - result := new(ctypes.ResultABCIInfo) - _, err := c.caller.Call("abci_info", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "ABCIInfo") - } - return result, nil -} - -func (c *baseRPCClient) ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) -} - -func (c *baseRPCClient) ABCIQueryWithOptions(path string, data []byte, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - result := new(ctypes.ResultABCIQuery) - _, err := c.caller.Call("abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, - result) - if err != nil { - return nil, errors.Wrap(err, "ABCIQuery") - } - return result, nil -} - -func (c *baseRPCClient) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - result := new(ctypes.ResultBroadcastTxCommit) - _, err := c.caller.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) - if err != nil { - return nil, errors.Wrap(err, "broadcast_tx_commit") - } - return result, nil -} - -func (c *baseRPCClient) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.broadcastTX("broadcast_tx_async", tx) -} - -func (c *baseRPCClient) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.broadcastTX("broadcast_tx_sync", tx) -} - -func (c *baseRPCClient) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - result := new(ctypes.ResultBroadcastTx) - _, err := c.caller.Call(route, map[string]interface{}{"tx": tx}, result) - if err != nil { - return nil, errors.Wrap(err, route) - } - return result, nil -} - -func (c *baseRPCClient) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.caller.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result) - if err != nil { - return nil, errors.Wrap(err, "unconfirmed_txs") - } - return result, nil -} - -func (c *baseRPCClient) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.caller.Call("num_unconfirmed_txs", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "num_unconfirmed_txs") - } - return result, nil -} - -func (c *baseRPCClient) NetInfo() (*ctypes.ResultNetInfo, error) { - result := new(ctypes.ResultNetInfo) - _, err := c.caller.Call("net_info", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "NetInfo") - } - return result, nil -} - -func (c *baseRPCClient) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - result := new(ctypes.ResultDumpConsensusState) - _, err := c.caller.Call("dump_consensus_state", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "DumpConsensusState") - } - return result, nil -} - -func (c *baseRPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) { - result := new(ctypes.ResultConsensusState) - _, err := c.caller.Call("consensus_state", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "ConsensusState") - } - return result, nil -} - -func (c *baseRPCClient) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { - result := new(ctypes.ResultConsensusParams) - - if _, err := c.caller.Call( - "consensus_params", - map[string]interface{}{ - "height": height, - }, - result, - ); err != nil { - return nil, errors.Wrap(err, "ConsensusParams") - } - - return result, nil -} - -func (c *baseRPCClient) Health() (*ctypes.ResultHealth, error) { - result := new(ctypes.ResultHealth) - _, err := c.caller.Call("health", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "Health") - } - return result, nil -} - -func (c *baseRPCClient) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - result := new(ctypes.ResultBlockchainInfo) - _, err := c.caller.Call("blockchain", - map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, - result) - if err != nil { - return nil, errors.Wrap(err, "BlockchainInfo") - } - return result, nil -} - -func (c *baseRPCClient) Genesis() (*ctypes.ResultGenesis, error) { - result := new(ctypes.ResultGenesis) - _, err := c.caller.Call("genesis", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "Genesis") - } - return result, nil -} - -func (c *baseRPCClient) Block(height *int64) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) - _, err := c.caller.Call("block", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Block") - } - return result, nil -} - -func (c *baseRPCClient) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { - result := new(ctypes.ResultBlockResults) - _, err := c.caller.Call("block_results", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Block Result") - } - return result, nil -} - -func (c *baseRPCClient) Commit(height *int64) (*ctypes.ResultCommit, error) { - result := new(ctypes.ResultCommit) - _, err := c.caller.Call("commit", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Commit") - } - return result, nil -} - -func (c *baseRPCClient) Tx(hash []byte) (*ctypes.ResultTx, error) { - result := new(ctypes.ResultTx) - params := map[string]interface{}{ - "hash": hash, - } - _, err := c.caller.Call("tx", params, result) - if err != nil { - return nil, errors.Wrap(err, "Tx") - } - return result, nil -} - -func (c *baseRPCClient) Validators(height *int64) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) - params := map[string]interface{}{} - if height != nil { - params["height"] = height - } - _, err := c.caller.Call("validators", params, result) - if err != nil { - return nil, errors.Wrap(err, "Validators") - } - return result, nil -} diff --git a/tm2/pkg/bft/rpc/client/interface.go b/tm2/pkg/bft/rpc/client/interface.go deleted file mode 100644 index a8f42ddc955..00000000000 --- a/tm2/pkg/bft/rpc/client/interface.go +++ /dev/null @@ -1,100 +0,0 @@ -package client - -/* -The client package provides a general purpose interface (Client) for connecting -to a tendermint node, as well as higher-level functionality. - -The main implementation for production code is client.HTTP, which -connects via http to the jsonrpc interface of the tendermint node. - -For connecting to a node running in the same process (eg. when -compiling the abci app in the same process), you can use the client.Local -implementation. - -For mocking out server responses during testing to see behavior for -arbitrary return values, use the mock package. - -In addition to the Client interface, which should be used externally -for maximum flexibility and testability, and two implementations, -this package also provides helper functions that work on any Client -implementation. -*/ - -import ( - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// Client wraps most important rpc calls a client would make. -// -// NOTE: Events cannot be subscribed to from the RPC APIs. For events -// subscriptions and filters and queries, an external API must be used that -// first synchronously consumes the events from the node's synchronous event -// switch, or reads logged events from the filesystem. -type Client interface { - ABCIClient - HistoryClient - NetworkClient - SignClient - StatusClient - MempoolClient - TxClient -} - -// ABCIClient groups together the functionality that principally affects the -// ABCI app. -// -// In many cases this will be all we want, so we can accept an interface which -// is easier to mock. -type ABCIClient interface { - // Reading from abci app - ABCIInfo() (*ctypes.ResultABCIInfo, error) - ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) - ABCIQueryWithOptions(path string, data []byte, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) - - // Writing to abci app - BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) -} - -// SignClient groups together the functionality needed to get valid signatures -// and prove anything about the chain. -type SignClient interface { - Block(height *int64) (*ctypes.ResultBlock, error) - BlockResults(height *int64) (*ctypes.ResultBlockResults, error) - Commit(height *int64) (*ctypes.ResultCommit, error) - Validators(height *int64) (*ctypes.ResultValidators, error) -} - -// HistoryClient provides access to data from genesis to now in large chunks. -type HistoryClient interface { - Genesis() (*ctypes.ResultGenesis, error) - BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) -} - -// StatusClient provides access to general chain info. -type StatusClient interface { - Status() (*ctypes.ResultStatus, error) -} - -// NetworkClient is general info about the network state. May not be needed -// usually. -type NetworkClient interface { - NetInfo() (*ctypes.ResultNetInfo, error) - DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) - ConsensusState() (*ctypes.ResultConsensusState, error) - ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) - Health() (*ctypes.ResultHealth, error) -} - -// MempoolClient shows us data about current mempool state. -type MempoolClient interface { - UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) - NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) -} - -type TxClient interface { - Tx(hash []byte) (*ctypes.ResultTx, error) -} diff --git a/tm2/pkg/bft/rpc/client/localclient.go b/tm2/pkg/bft/rpc/client/local.go similarity index 100% rename from tm2/pkg/bft/rpc/client/localclient.go rename to tm2/pkg/bft/rpc/client/local.go diff --git a/tm2/pkg/bft/rpc/client/main_test.go b/tm2/pkg/bft/rpc/client/main_test.go deleted file mode 100644 index 759104a3029..00000000000 --- a/tm2/pkg/bft/rpc/client/main_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore" - nm "github.com/gnolang/gno/tm2/pkg/bft/node" - rpctest "github.com/gnolang/gno/tm2/pkg/bft/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and kvstore) in the background to test against - dir, err := os.MkdirTemp("/tmp", "rpc-client-test") - if err != nil { - panic(err) - } - app := kvstore.NewPersistentKVStoreApplication(dir) - node = rpctest.StartTendermint(app) - - code := m.Run() - - // and shut down proper at the end - rpctest.StopTendermint(node) - os.Exit(code) -} diff --git a/tm2/pkg/bft/rpc/client/mock/abci.go b/tm2/pkg/bft/rpc/client/mock/abci.go deleted file mode 100644 index af09fa6c43a..00000000000 --- a/tm2/pkg/bft/rpc/client/mock/abci.go +++ /dev/null @@ -1,209 +0,0 @@ -package mock - -import ( - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// ABCIApp will send all abci related request to the named app, -// so you can test app behavior from a client without needing -// an entire tendermint node -type ABCIApp struct { - App abci.Application -} - -var ( - _ client.ABCIClient = ABCIApp{} - _ client.ABCIClient = ABCIMock{} - _ client.ABCIClient = (*ABCIRecorder)(nil) -) - -func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{Response: a.App.Info(abci.RequestInfo{})}, nil -} - -func (a ABCIApp) ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) { - return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (a ABCIApp) ABCIQueryWithOptions(path string, data []byte, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{ - Data: data, - Path: path, - Height: opts.Height, - Prove: opts.Prove, - }) - return &ctypes.ResultABCIQuery{Response: q}, nil -} - -// NOTE: Caller should call a.App.Commit() separately, -// this function does not actually wait for a commit. -// TODO: Make it wait for a commit and set res.Height appropriately. -func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res := ctypes.ResultBroadcastTxCommit{} - res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) - if res.CheckTx.IsErr() { - return &res, nil - } - res.DeliverTx = a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) - res.Height = -1 // TODO - return &res, nil -} - -func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) - // and this gets written in a background thread... - if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() //nolint: errcheck - } - return &ctypes.ResultBroadcastTx{Error: c.Error, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil -} - -func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) - // and this gets written in a background thread... - if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() //nolint: errcheck - } - return &ctypes.ResultBroadcastTx{Error: c.Error, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil -} - -// ABCIMock will send all abci related request to the named app, -// so you can test app behavior from a client without needing -// an entire tendermint node -type ABCIMock struct { - Info Call - Query Call - BroadcastCommit Call - Broadcast Call -} - -func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - res, err := m.Info.GetResponse(nil) - if err != nil { - return nil, err - } - return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil -} - -func (m ABCIMock) ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) { - return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (m ABCIMock) ABCIQueryWithOptions(path string, data []byte, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) - if err != nil { - return nil, err - } - resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{Response: resQuery}, nil -} - -func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res, err := m.BroadcastCommit.GetResponse(tx) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultBroadcastTxCommit), nil -} - -func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := m.Broadcast.GetResponse(tx) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultBroadcastTx), nil -} - -func (m ABCIMock) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := m.Broadcast.GetResponse(tx) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultBroadcastTx), nil -} - -// ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) -// and record all ABCI related calls. -type ABCIRecorder struct { - Client client.ABCIClient - Calls []Call -} - -func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { - return &ABCIRecorder{ - Client: client, - Calls: []Call{}, - } -} - -type QueryArgs struct { - Path string - Data []byte - Height int64 - Prove bool -} - -func (r *ABCIRecorder) addCall(call Call) { - r.Calls = append(r.Calls, call) -} - -func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - res, err := r.Client.ABCIInfo() - r.addCall(Call{ - Name: "abci_info", - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) { - return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data []byte, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := r.Client.ABCIQueryWithOptions(path, data, opts) - r.addCall(Call{ - Name: "abci_query", - Args: QueryArgs{path, data, opts.Height, opts.Prove}, - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res, err := r.Client.BroadcastTxCommit(tx) - r.addCall(Call{ - Name: "broadcast_tx_commit", - Args: tx, - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := r.Client.BroadcastTxAsync(tx) - r.addCall(Call{ - Name: "broadcast_tx_async", - Args: tx, - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := r.Client.BroadcastTxSync(tx) - r.addCall(Call{ - Name: "broadcast_tx_sync", - Args: tx, - Response: res, - Error: err, - }) - return res, err -} diff --git a/tm2/pkg/bft/rpc/client/mock/abci_test.go b/tm2/pkg/bft/rpc/client/mock/abci_test.go deleted file mode 100644 index 08019807f33..00000000000 --- a/tm2/pkg/bft/rpc/client/mock/abci_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package mock_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore" - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client/mock" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/errors" -) - -func TestABCIMock(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - key, value := []byte("foo"), []byte("bar") - height := int64(10) - goodTx := types.Tx{0x01, 0xff} - badTx := types.Tx{0x12, 0x21} - - m := mock.ABCIMock{ - Info: mock.Call{Error: errors.New("foobar")}, - Query: mock.Call{Response: abci.ResponseQuery{ - Key: key, - Value: value, - Height: height, - }}, - // Broadcast commit depends on call - BroadcastCommit: mock.Call{ - Args: goodTx, - Response: &ctypes.ResultBroadcastTxCommit{ - CheckTx: abci.ResponseCheckTx{ResponseBase: abci.ResponseBase{Data: []byte("stand")}}, - DeliverTx: abci.ResponseDeliverTx{ResponseBase: abci.ResponseBase{Data: []byte("deliver")}}, - }, - Error: errors.New("bad tx"), - }, - Broadcast: mock.Call{Error: errors.New("must commit")}, - } - - // now, let's try to make some calls - _, err := m.ABCIInfo() - require.NotNil(err) - assert.Equal("foobar", err.Error()) - - // query always returns the response - _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Prove: false}) - query := _query.Response - require.Nil(err) - require.NotNil(query) - assert.EqualValues(key, query.Key) - assert.EqualValues(value, query.Value) - assert.Equal(height, query.Height) - - // non-commit calls always return errors - _, err = m.BroadcastTxSync(goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) - _, err = m.BroadcastTxAsync(goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) - - // commit depends on the input - _, err = m.BroadcastTxCommit(badTx) - require.NotNil(err) - assert.Equal("bad tx", err.Error()) - bres, err := m.BroadcastTxCommit(goodTx) - require.Nil(err, "%+v", err) - assert.Nil(bres.CheckTx.Error) - assert.EqualValues("stand", string(bres.CheckTx.Data)) - assert.EqualValues("deliver", string(bres.DeliverTx.Data)) -} - -func TestABCIRecorder(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // This mock returns errors on everything but Query - m := mock.ABCIMock{ - Info: mock.Call{Response: abci.ResponseInfo{ - ResponseBase: abci.ResponseBase{ - Data: []byte("data"), - }, - ABCIVersion: "v0.0.0test", - AppVersion: "v0.0.0test", - }}, - Query: mock.Call{Error: errors.New("query")}, - Broadcast: mock.Call{Error: errors.New("broadcast")}, - BroadcastCommit: mock.Call{Error: errors.New("broadcast_commit")}, - } - r := mock.NewABCIRecorder(m) - - require.Equal(0, len(r.Calls)) - - _, err := r.ABCIInfo() - assert.Nil(err, "expected no err on info") - - _, err = r.ABCIQueryWithOptions("path", []byte("data"), client.ABCIQueryOptions{Prove: false}) - assert.NotNil(err, "expected error on query") - require.Equal(2, len(r.Calls)) - - info := r.Calls[0] - assert.Equal("abci_info", info.Name) - assert.Nil(info.Error) - assert.Nil(info.Args) - require.NotNil(info.Response) - ir, ok := info.Response.(*ctypes.ResultABCIInfo) - require.True(ok) - assert.Equal("data", string(ir.Response.Data)) - assert.Equal("v0.0.0test", ir.Response.ABCIVersion) - assert.Equal("v0.0.0test", ir.Response.AppVersion) - - query := r.Calls[1] - assert.Equal("abci_query", query.Name) - assert.Nil(query.Response) - require.NotNil(query.Error) - assert.Equal("query", query.Error.Error()) - require.NotNil(query.Args) - qa, ok := query.Args.(mock.QueryArgs) - require.True(ok) - assert.Equal("path", qa.Path) - assert.EqualValues("data", string(qa.Data)) - assert.False(qa.Prove) - - // now add some broadcasts (should all err) - txs := []types.Tx{{1}, {2}, {3}} - _, err = r.BroadcastTxCommit(txs[0]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxSync(txs[1]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxAsync(txs[2]) - assert.NotNil(err, "expected err on broadcast") - - require.Equal(5, len(r.Calls)) - - bc := r.Calls[2] - assert.Equal("broadcast_tx_commit", bc.Name) - assert.Nil(bc.Response) - require.NotNil(bc.Error) - assert.EqualValues(bc.Args, txs[0]) - - bs := r.Calls[3] - assert.Equal("broadcast_tx_sync", bs.Name) - assert.Nil(bs.Response) - require.NotNil(bs.Error) - assert.EqualValues(bs.Args, txs[1]) - - ba := r.Calls[4] - assert.Equal("broadcast_tx_async", ba.Name) - assert.Nil(ba.Response) - require.NotNil(ba.Error) - assert.EqualValues(ba.Args, txs[2]) -} - -func TestABCIApp(t *testing.T) { - assert, require := assert.New(t), require.New(t) - app := kvstore.NewKVStoreApplication() - m := mock.ABCIApp{app} - - // get some info - info, err := m.ABCIInfo() - require.Nil(err) - assert.Equal(`{"size":0}`, string(info.Response.Data)) - - // add a key - key, value := "foo", "bar" - tx := fmt.Sprintf("%s=%s", key, value) - res, err := m.BroadcastTxCommit(types.Tx(tx)) - require.Nil(err) - assert.True(res.CheckTx.IsOK()) - require.NotNil(res.DeliverTx) - assert.True(res.DeliverTx.IsOK()) - - // commit - // TODO: This may not be necessary in the future - if res.Height == -1 { - m.App.Commit() - } - - // check the key - _qres, err := m.ABCIQueryWithOptions("/key", []byte(key), client.ABCIQueryOptions{Prove: true}) - qres := _qres.Response - require.Nil(err) - assert.EqualValues(value, qres.Value) - - // XXX Check proof -} diff --git a/tm2/pkg/bft/rpc/client/mock/client.go b/tm2/pkg/bft/rpc/client/mock/client.go deleted file mode 100644 index 5dc048fa5ff..00000000000 --- a/tm2/pkg/bft/rpc/client/mock/client.go +++ /dev/null @@ -1,153 +0,0 @@ -package mock - -/* -package mock returns a Client implementation that -accepts various (mock) implementations of the various methods. - -This implementation is useful for using in tests, when you don't -need a real server, but want a high-level of control about -the server response you want to mock (eg. error handling), -or if you just want to record the calls to verify in your tests. - -For real clients, you probably want the "http" package. If you -want to directly call a tendermint node in process, you can use the -"local" package. -*/ - -import ( - "reflect" - - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/core" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/service" -) - -// Client wraps arbitrary implementations of the various interfaces. -// -// We provide a few choices to mock out each one in this package. -// Nothing hidden here, so no New function, just construct it from -// some parts, and swap them out them during the tests. -type Client struct { - client.ABCIClient - client.SignClient - client.HistoryClient - client.StatusClient - client.MempoolClient - client.TxClient - service.Service -} - -var _ client.Client = Client{} - -// Call is used by recorders to save a call and response. -// It can also be used to configure mock responses. -type Call struct { - Name string - Args interface{} - Response interface{} - Error error -} - -// GetResponse will generate the appropriate response for us, when -// using the Call struct to configure a Mock handler. -// -// When configuring a response, if only one of Response or Error is -// set then that will always be returned. If both are set, then -// we return Response if the Args match the set args, Error otherwise. -func (c Call) GetResponse(args interface{}) (interface{}, error) { - // handle the case with no response - if c.Response == nil { - if c.Error == nil { - panic("Misconfigured call, you must set either Response or Error") - } - return nil, c.Error - } - // response without error - if c.Error == nil { - return c.Response, nil - } - // have both, we must check args.... - if reflect.DeepEqual(args, c.Args) { - return c.Response, nil - } - return nil, c.Error -} - -func (c Client) Status() (*ctypes.ResultStatus, error) { - return core.Status(&rpctypes.Context{}) -} - -func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo(&rpctypes.Context{}) -} - -func (c Client) ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (c Client) ABCIQueryWithOptions(path string, data []byte, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) -} - -func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(&rpctypes.Context{}, tx) -} - -func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(&rpctypes.Context{}, tx) -} - -func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(&rpctypes.Context{}, tx) -} - -func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { - return core.NetInfo(&rpctypes.Context{}) -} - -func (c Client) ConsensusState() (*ctypes.ResultConsensusState, error) { - return core.ConsensusState(&rpctypes.Context{}) -} - -func (c Client) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { - return core.ConsensusParams(&rpctypes.Context{}, height) -} - -func (c Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - return core.DumpConsensusState(&rpctypes.Context{}) -} - -func (c Client) Health() (*ctypes.ResultHealth, error) { - return core.Health(&rpctypes.Context{}) -} - -func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds) -} - -func (c Client) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent) -} - -func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) -} - -func (c Client) Genesis() (*ctypes.ResultGenesis, error) { - return core.Genesis(&rpctypes.Context{}) -} - -func (c Client) Block(height *int64) (*ctypes.ResultBlock, error) { - return core.Block(&rpctypes.Context{}, height) -} - -func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(&rpctypes.Context{}, height) -} - -func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) { - return core.Validators(&rpctypes.Context{}, height) -} diff --git a/tm2/pkg/bft/rpc/client/mock/status.go b/tm2/pkg/bft/rpc/client/mock/status.go deleted file mode 100644 index e5a1d84209b..00000000000 --- a/tm2/pkg/bft/rpc/client/mock/status.go +++ /dev/null @@ -1,52 +0,0 @@ -package mock - -import ( - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" -) - -// StatusMock returns the result specified by the Call -type StatusMock struct { - Call -} - -var ( - _ client.StatusClient = (*StatusMock)(nil) - _ client.StatusClient = (*StatusRecorder)(nil) -) - -func (m *StatusMock) Status() (*ctypes.ResultStatus, error) { - res, err := m.GetResponse(nil) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultStatus), nil -} - -// StatusRecorder can wrap another type (StatusMock, full client) -// and record the status calls -type StatusRecorder struct { - Client client.StatusClient - Calls []Call -} - -func NewStatusRecorder(client client.StatusClient) *StatusRecorder { - return &StatusRecorder{ - Client: client, - Calls: []Call{}, - } -} - -func (r *StatusRecorder) addCall(call Call) { - r.Calls = append(r.Calls, call) -} - -func (r *StatusRecorder) Status() (*ctypes.ResultStatus, error) { - res, err := r.Client.Status() - r.addCall(Call{ - Name: "status", - Response: res, - Error: err, - }) - return res, err -} diff --git a/tm2/pkg/bft/rpc/client/mock/status_test.go b/tm2/pkg/bft/rpc/client/mock/status_test.go deleted file mode 100644 index ad2f998eed7..00000000000 --- a/tm2/pkg/bft/rpc/client/mock/status_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package mock_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client/mock" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" -) - -func TestStatus(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - m := &mock.StatusMock{ - Call: mock.Call{ - Response: &ctypes.ResultStatus{ - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: []byte("block"), - LatestAppHash: []byte("app"), - LatestBlockHeight: 10, - }, - }, - }, - } - - r := mock.NewStatusRecorder(m) - require.Equal(0, len(r.Calls)) - - // make sure response works proper - status, err := r.Status() - require.Nil(err, "%+v", err) - assert.EqualValues("block", status.SyncInfo.LatestBlockHash) - assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) - - // make sure recorder works properly - require.Equal(1, len(r.Calls)) - rs := r.Calls[0] - assert.Equal("status", rs.Name) - assert.Nil(rs.Args) - assert.Nil(rs.Error) - require.NotNil(rs.Response) - st, ok := rs.Response.(*ctypes.ResultStatus) - require.True(ok) - assert.EqualValues("block", st.SyncInfo.LatestBlockHash) - assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) -} diff --git a/tm2/pkg/bft/rpc/client/mock_test.go b/tm2/pkg/bft/rpc/client/mock_test.go new file mode 100644 index 00000000000..bc2d92367bc --- /dev/null +++ b/tm2/pkg/bft/rpc/client/mock_test.go @@ -0,0 +1,43 @@ +package client + +import ( + "context" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" +) + +type ( + sendRequestDelegate func(context.Context, types.RPCRequest) (*types.RPCResponse, error) + sendBatchDelegate func(context.Context, types.RPCRequests) (types.RPCResponses, error) + closeDelegate func() error +) + +type mockClient struct { + sendRequestFn sendRequestDelegate + sendBatchFn sendBatchDelegate + closeFn closeDelegate +} + +func (m *mockClient) SendRequest(ctx context.Context, request types.RPCRequest) (*types.RPCResponse, error) { + if m.sendRequestFn != nil { + return m.sendRequestFn(ctx, request) + } + + return nil, nil +} + +func (m *mockClient) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + if m.sendBatchFn != nil { + return m.sendBatchFn(ctx, requests) + } + + return nil, nil +} + +func (m *mockClient) Close() error { + if m.closeFn != nil { + return m.closeFn() + } + + return nil +} diff --git a/tm2/pkg/bft/rpc/client/options.go b/tm2/pkg/bft/rpc/client/options.go new file mode 100644 index 00000000000..e4b0a1a89d2 --- /dev/null +++ b/tm2/pkg/bft/rpc/client/options.go @@ -0,0 +1,12 @@ +package client + +import "time" + +type Option func(client *RPCClient) + +// WithRequestTimeout sets the request timeout +func WithRequestTimeout(timeout time.Duration) Option { + return func(client *RPCClient) { + client.requestTimeout = timeout + } +} diff --git a/tm2/pkg/bft/rpc/client/rpc_test.go b/tm2/pkg/bft/rpc/client/rpc_test.go deleted file mode 100644 index 49640a394fb..00000000000 --- a/tm2/pkg/bft/rpc/client/rpc_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package client_test - -import ( - "net/http" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpcclient "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" - rpctest "github.com/gnolang/gno/tm2/pkg/bft/rpc/test" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -func getHTTPClient() *client.HTTP { - cfg, _ := rpctest.GetConfig() - rpcAddr := cfg.RPC.ListenAddress - return client.NewHTTP(rpcAddr, "/websocket") -} - -func getLocalClient() *client.Local { - return client.NewLocal() -} - -// GetClients returns a slice of clients for table-driven tests -func GetClients() []client.Client { - return []client.Client{ - getHTTPClient(), - getLocalClient(), - } -} - -func TestNilCustomHTTPClient(t *testing.T) { - t.Parallel() - - require.Panics(t, func() { - client.NewHTTPWithClient("http://example.com", "/websocket", nil) - }) - require.Panics(t, func() { - rpcclient.NewJSONRPCClientWithHTTPClient("http://example.com", nil) - }) -} - -func TestCustomHTTPClient(t *testing.T) { - t.Parallel() - - cfg, _ := rpctest.GetConfig() - remote := cfg.RPC.ListenAddress - c := client.NewHTTPWithClient(remote, "/websocket", http.DefaultClient) - status, err := c.Status() - require.NoError(t, err) - require.NotNil(t, status) -} - -func TestCorsEnabled(t *testing.T) { - t.Parallel() - - cfg, _ := rpctest.GetConfig() - origin := cfg.RPC.CORSAllowedOrigins[0] - remote := strings.Replace(cfg.RPC.ListenAddress, "tcp", "http", -1) - - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) - req.Header.Set("Origin", origin) - c := &http.Client{} - resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) - defer resp.Body.Close() - - assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - cfg, _ := rpctest.GetConfig() - moniker := cfg.Moniker - status, err := c.Status() - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } -} - -// Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo() - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(string(info.Response.ResponseBase.Data), "size")) - } -} - -func TestNetInfo(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo() - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState() - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} - -func TestConsensusState(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState() - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health() - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { - t.Parallel() - - for i, c := range GetClients() { - // make sure this is the right genesis file - gen, err := c.Genesis() - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - vals, err := c.Validators(nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestABCIQuery(t *testing.T) { - for i, c := range GetClients() { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - client.WaitForHeight(c, apph, nil) - res, err := c.ABCIQuery("/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } -} - -// Make some app checks -func TestAppCalls(t *testing.T) { - t.Parallel() - - assert, require := assert.New(t), require.New(t) - for i, c := range GetClients() { - // get an offset of height to avoid racing and guessing - s, err := c.Status() - require.Nil(err, "%d: %+v", i, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 2 - _, err = c.Block(&h) - assert.NotNil(err) // no block yet - - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(err, "%d: %+v", i, err) - require.True(bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - if err := client.WaitForHeight(c, apph, nil); err != nil { - t.Error(err) - } - _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: false}) - qres := _qres.Response - if assert.Nil(err) && assert.True(qres.IsOK()) { - assert.Equal(k, qres.Key) - assert.EqualValues(v, qres.Value) - } - - /* - // make sure we can lookup the tx with proof - ptx, err := c.Tx(bres.Hash, true) - require.Nil(err, "%d: %+v", i, err) - assert.EqualValues(txh, ptx.Height) - assert.EqualValues(tx, ptx.Tx) - */ - - // and we can even check the block is added - block, err := c.Block(&apph) - require.Nil(err, "%d: %+v", i, err) - appHash := block.BlockMeta.Header.AppHash - assert.True(len(appHash) > 0) - assert.EqualValues(apph, block.BlockMeta.Header.Height) - - // now check the results - blockResults, err := c.BlockResults(&txh) - require.Nil(err, "%d: %+v", i, err) - assert.Equal(txh, blockResults.Height) - if assert.Equal(1, len(blockResults.Results.DeliverTxs)) { - // check success code - assert.Nil(blockResults.Results.DeliverTxs[0].Error) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(apph, apph) - require.Nil(err, "%d: %+v", i, err) - assert.True(info.LastHeight >= apph) - if assert.Equal(1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(apph, lastMeta.Header.Height) - bMeta := block.BlockMeta - assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(bMeta.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(&apph) - require.Nil(err, "%d: %+v", i, err) - cappHash := commit.Header.AppHash - assert.Equal(appHash, cappHash) - assert.NotNil(commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(&h) - require.Nil(err, "%d: %+v", i, err) - assert.Equal(block.Block.LastCommit, commit2.Commit) - - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: true}) - pres := _pres.Response - assert.Nil(err) - assert.True(pres.IsOK()) - - // XXX Test proof - } -} - -func TestBroadcastTxSync(t *testing.T) { - t.Parallel() - - require := require.New(t) - - // TODO (melekes): use mempool which is set on RPC rather than getting it from node - mempool := node.Mempool() - initMempoolSize := mempool.Size() - - for i, c := range GetClients() { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(tx) - require.Nil(err, "%d: %+v", i, err) - require.Nil(bres.Error) - - require.Equal(initMempoolSize+1, mempool.Size()) - - txs := mempool.ReapMaxTxs(len(tx)) - require.EqualValues(tx, txs[0]) - mempool.Flush() - } -} - -func TestBroadcastTxCommit(t *testing.T) { - require := require.New(t) - - mempool := node.Mempool() - for i, c := range GetClients() { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(err, "%d: %+v", i, err) - require.True(bres.CheckTx.IsOK()) - require.True(bres.DeliverTx.IsOK()) - - require.Equal(0, mempool.Size()) - } -} - -func TestUnconfirmedTxs(t *testing.T) { - _, _, tx := MakeTxKV() - - mempool := node.Mempool() - _ = mempool.CheckTx(tx, nil) - - for i, c := range GetClients() { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.UnconfirmedTxs(1) - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, mempool.TxsBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) - } - - mempool.Flush() -} - -func TestNumUnconfirmedTxs(t *testing.T) { - _, _, tx := MakeTxKV() - - mempool := node.Mempool() - _ = mempool.CheckTx(tx, nil) - mempoolSize := mempool.Size() - - for i, c := range GetClients() { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs() - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, mempoolSize, res.Count) - assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, mempool.TxsBytes(), res.TotalBytes) - } - - mempool.Flush() -} - -func TestTx(t *testing.T) { - // Prepare the transaction - // by broadcasting it to the chain - c := getHTTPClient() - _, _, tx := MakeTxKV() - - response, err := c.BroadcastTxCommit(tx) - require.NoError(t, err) - require.NotNil(t, response) - - var ( - txHeight = response.Height - txHash = response.Hash - ) - - cases := []struct { - name string - valid bool - hash []byte - }{ - { - "tx result found", - true, - txHash, - }, - { - "tx result not found", - false, - types.Tx("a different tx").Hash(), - }, - } - - for _, c := range GetClients() { - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(tc.hash) - - if !tc.valid { - require.Error(t, err) - - return - } - - require.NoError(t, err) - - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - }) - } - } -} - -func TestBatchedJSONRPCCalls(t *testing.T) { - c := getHTTPClient() - testBatchedJSONRPCCalls(t, c) -} - -func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) { - t.Helper() - - k1, v1, tx1 := MakeTxKV() - k2, v2, tx2 := MakeTxKV() - - batch := c.NewBatch() - r1, err := batch.BroadcastTxCommit(tx1) - require.NoError(t, err) - r2, err := batch.BroadcastTxCommit(tx2) - require.NoError(t, err) - require.Equal(t, 2, batch.Count()) - bresults, err := batch.Send() - require.NoError(t, err) - require.Len(t, bresults, 2) - require.Equal(t, 0, batch.Count()) - - bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) - require.True(t, ok) - require.Equal(t, *bresult1, *r1) - bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) - require.True(t, ok) - require.Equal(t, *bresult2, *r2) - apph := max(bresult1.Height, bresult2.Height) + 1 - - client.WaitForHeight(c, apph, nil) - - q1, err := batch.ABCIQuery("/key", k1) - require.NoError(t, err) - q2, err := batch.ABCIQuery("/key", k2) - require.NoError(t, err) - require.Equal(t, 2, batch.Count()) - qresults, err := batch.Send() - require.NoError(t, err) - require.Len(t, qresults, 2) - require.Equal(t, 0, batch.Count()) - - qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) - require.True(t, ok) - require.Equal(t, *qresult1, *q1) - qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) - require.True(t, ok) - require.Equal(t, *qresult2, *q2) - - require.Equal(t, qresult1.Response.Key, k1) - require.Equal(t, qresult2.Response.Key, k2) - require.Equal(t, qresult1.Response.Value, v1) - require.Equal(t, qresult2.Response.Value, v2) -} - -func TestBatchedJSONRPCCallsCancellation(t *testing.T) { - t.Parallel() - - c := getHTTPClient() - _, _, tx1 := MakeTxKV() - _, _, tx2 := MakeTxKV() - - batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(tx1) - require.NoError(t, err) - _, err = batch.BroadcastTxCommit(tx2) - require.NoError(t, err) - // we should have 2 requests waiting - require.Equal(t, 2, batch.Count()) - // we want to make sure we cleared 2 pending requests - require.Equal(t, 2, batch.Clear()) - // now there should be no batched requests - require.Equal(t, 0, batch.Count()) -} - -func TestSendingEmptyJSONRPCRequestBatch(t *testing.T) { - t.Parallel() - - c := getHTTPClient() - batch := c.NewBatch() - _, err := batch.Send() - require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") -} - -func TestClearingEmptyJSONRPCRequestBatch(t *testing.T) { - t.Parallel() - - c := getHTTPClient() - batch := c.NewBatch() - require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") -} - -func TestConcurrentJSONRPCBatching(t *testing.T) { - var wg sync.WaitGroup - c := getHTTPClient() - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - defer wg.Done() - testBatchedJSONRPCCalls(t, c) - }() - } - wg.Wait() -} diff --git a/tm2/pkg/bft/rpc/client/types.go b/tm2/pkg/bft/rpc/client/types.go index 6a23fa4509d..52427a1a818 100644 --- a/tm2/pkg/bft/rpc/client/types.go +++ b/tm2/pkg/bft/rpc/client/types.go @@ -1,5 +1,10 @@ package client +import ( + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + // ABCIQueryOptions can be used to provide options for ABCIQuery call other // than the DefaultABCIQueryOptions. type ABCIQueryOptions struct { @@ -9,3 +14,77 @@ type ABCIQueryOptions struct { // DefaultABCIQueryOptions are latest height (0) and prove false. var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} + +// Client wraps most important rpc calls a client would make. +// +// NOTE: Events cannot be subscribed to from the RPC APIs. For events +// subscriptions and filters and queries, an external API must be used that +// first synchronously consumes the events from the node's synchronous event +// switch, or reads logged events from the filesystem. +type Client interface { + ABCIClient + HistoryClient + NetworkClient + SignClient + StatusClient + MempoolClient + TxClient +} + +// ABCIClient groups together the functionality that principally affects the +// ABCI app. +// +// In many cases this will be all we want, so we can accept an interface which +// is easier to mock. +type ABCIClient interface { + // Reading from abci app + ABCIInfo() (*ctypes.ResultABCIInfo, error) + ABCIQuery(path string, data []byte) (*ctypes.ResultABCIQuery, error) + ABCIQueryWithOptions(path string, data []byte, + opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) + + // Writing to abci app + BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) +} + +// SignClient groups together the functionality needed to get valid signatures +// and prove anything about the chain. +type SignClient interface { + Block(height *int64) (*ctypes.ResultBlock, error) + BlockResults(height *int64) (*ctypes.ResultBlockResults, error) + Commit(height *int64) (*ctypes.ResultCommit, error) + Validators(height *int64) (*ctypes.ResultValidators, error) +} + +// HistoryClient provides access to data from genesis to now in large chunks. +type HistoryClient interface { + Genesis() (*ctypes.ResultGenesis, error) + BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) +} + +// StatusClient provides access to general chain info. +type StatusClient interface { + Status() (*ctypes.ResultStatus, error) +} + +// NetworkClient is general info about the network state. May not be needed +// usually. +type NetworkClient interface { + NetInfo() (*ctypes.ResultNetInfo, error) + DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) + ConsensusState() (*ctypes.ResultConsensusState, error) + ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) + Health() (*ctypes.ResultHealth, error) +} + +// MempoolClient shows us data about current mempool state. +type MempoolClient interface { + UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) +} + +type TxClient interface { + Tx(hash []byte) (*ctypes.ResultTx, error) +} diff --git a/tm2/pkg/bft/rpc/config/config.go b/tm2/pkg/bft/rpc/config/config.go index 76c490bf94c..1428861626c 100644 --- a/tm2/pkg/bft/rpc/config/config.go +++ b/tm2/pkg/bft/rpc/config/config.go @@ -163,3 +163,12 @@ func (cfg RPCConfig) CertFile() string { func (cfg RPCConfig) IsTLSEnabled() bool { return cfg.TLSCertFile != "" && cfg.TLSKeyFile != "" } + +// helper function to make config creation independent of root dir +func join(root, path string) string { + if filepath.IsAbs(path) { + return path + } + + return filepath.Join(root, path) +} diff --git a/tm2/pkg/bft/rpc/config/utils.go b/tm2/pkg/bft/rpc/config/utils.go deleted file mode 100644 index 5a6eec09e43..00000000000 --- a/tm2/pkg/bft/rpc/config/utils.go +++ /dev/null @@ -1,11 +0,0 @@ -package config - -import "path/filepath" - -// helper function to make config creation independent of root dir -func join(root, path string) string { - if filepath.IsAbs(path) { - return path - } - return filepath.Join(root, path) -} diff --git a/tm2/pkg/bft/rpc/lib/client/args_test.go b/tm2/pkg/bft/rpc/lib/client/args_test.go deleted file mode 100644 index 2a7e749f094..00000000000 --- a/tm2/pkg/bft/rpc/lib/client/args_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package rpcclient - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type Tx []byte - -type Foo struct { - Bar int - Baz string -} - -func TestArgToJSON(t *testing.T) { - t.Parallel() - - assert := assert.New(t) - require := require.New(t) - - cases := []struct { - input interface{} - expected string - }{ - {[]byte("1234"), "0x31323334"}, - {Tx("654"), "0x363534"}, - {Foo{7, "hello"}, `{"Bar":"7","Baz":"hello"}`}, - } - - for i, tc := range cases { - args := map[string]interface{}{"data": tc.input} - err := argsToJSON(args) - require.Nil(err, "%d: %+v", i, err) - require.Equal(1, len(args), "%d", i) - data, ok := args["data"].(string) - require.True(ok, "%d: %#v", i, args["data"]) - assert.Equal(tc.expected, data, "%d", i) - } -} diff --git a/tm2/pkg/bft/rpc/lib/client/batch/batch.go b/tm2/pkg/bft/rpc/lib/client/batch/batch.go new file mode 100644 index 00000000000..e507cd9408f --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/batch/batch.go @@ -0,0 +1,64 @@ +package batch + +import ( + "context" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" +) + +type Client interface { + SendBatch(context.Context, types.RPCRequests) (types.RPCResponses, error) +} + +// Batch allows us to buffer multiple request/response structures +// into a single batch request. +// NOT thread safe +type Batch struct { + client Client + requests types.RPCRequests +} + +// NewBatch creates a new batch object +func NewBatch(client Client) *Batch { + return &Batch{ + client: client, + requests: make(types.RPCRequests, 0), + } +} + +// Count returns the number of enqueued requests waiting to be sent +func (b *Batch) Count() int { + return len(b.requests) +} + +// Clear empties out the request batch +func (b *Batch) Clear() int { + return b.clear() +} + +func (b *Batch) clear() int { + count := len(b.requests) + b.requests = make(types.RPCRequests, 0) + + return count +} + +// Send will attempt to send the current batch of enqueued requests, and then +// will clear out the requests once done +func (b *Batch) Send(ctx context.Context) (types.RPCResponses, error) { + defer func() { + b.clear() + }() + + responses, err := b.client.SendBatch(ctx, b.requests) + if err != nil { + return nil, err + } + + return responses, nil +} + +// AddRequest adds a new request onto the batch +func (b *Batch) AddRequest(request types.RPCRequest) { + b.requests = append(b.requests, request) +} diff --git a/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go b/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go new file mode 100644 index 00000000000..2ef01bb6360 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go @@ -0,0 +1,103 @@ +package batch + +import ( + "context" + "testing" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// generateRequests generates dummy RPC requests +func generateRequests(t *testing.T, count int) types.RPCRequests { + t.Helper() + + requests := make(types.RPCRequests, 0, count) + + for i := 0; i < count; i++ { + requests = append(requests, types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCIntID(i), + }) + } + + return requests +} + +func TestBatch_AddRequest(t *testing.T) { + t.Parallel() + + var ( + capturedSend types.RPCRequests + requests = generateRequests(t, 100) + + mockClient = &mockClient{ + sendBatchFn: func(_ context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + capturedSend = requests + + responses := make(types.RPCResponses, len(requests)) + + for index, request := range requests { + responses[index] = types.RPCResponse{ + JSONRPC: "2.0", + ID: request.ID, + } + } + + return responses, nil + }, + } + ) + + // Create the batch + b := NewBatch(mockClient) + + // Add the requests + for _, request := range requests { + b.AddRequest(request) + } + + // Make sure the count is correct + require.Equal(t, len(requests), b.Count()) + + // Send the requests + responses, err := b.Send(context.Background()) + require.NoError(t, err) + + // Make sure the correct requests were sent + assert.Equal(t, requests, capturedSend) + + // Make sure the correct responses were returned + require.Len(t, responses, len(requests)) + + for index, response := range responses { + assert.Equal(t, requests[index].ID, response.ID) + assert.Equal(t, requests[index].JSONRPC, response.JSONRPC) + assert.Nil(t, response.Result) + assert.Nil(t, response.Error) + } + + // Make sure the batch has been cleared after sending + assert.Equal(t, b.Count(), 0) +} + +func TestBatch_Clear(t *testing.T) { + t.Parallel() + + requests := generateRequests(t, 100) + + // Create the batch + b := NewBatch(nil) + + // Add the requests + for _, request := range requests { + b.AddRequest(request) + } + + // Clear the batch + require.EqualValues(t, len(requests), b.Clear()) + + // Make sure the batch is cleared + require.Equal(t, b.Count(), 0) +} diff --git a/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go b/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go new file mode 100644 index 00000000000..5865631feab --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go @@ -0,0 +1,21 @@ +package batch + +import ( + "context" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" +) + +type sendBatchDelegate func(context.Context, types.RPCRequests) (types.RPCResponses, error) + +type mockClient struct { + sendBatchFn sendBatchDelegate +} + +func (m *mockClient) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + if m.sendBatchFn != nil { + return m.sendBatchFn(ctx, requests) + } + + return nil, nil +} diff --git a/tm2/pkg/bft/rpc/lib/client/client.go b/tm2/pkg/bft/rpc/lib/client/client.go new file mode 100644 index 00000000000..8fc78d9eb64 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/client.go @@ -0,0 +1,34 @@ +package rpcclient + +import ( + "context" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" +) + +// Client is the JSON-RPC client abstraction +type Client interface { + // SendRequest sends a single RPC request to the JSON-RPC layer + SendRequest(context.Context, types.RPCRequest) (*types.RPCResponse, error) + + // SendBatch sends a batch of RPC requests to the JSON-RPC layer + SendBatch(context.Context, types.RPCRequests) (types.RPCResponses, error) + + // Close closes the RPC client + Close() error +} + +// Batch is the JSON-RPC batch abstraction +type Batch interface { + // AddRequest adds a single request to the RPC batch + AddRequest(types.RPCRequest) + + // Send sends the batch to the RPC layer + Send(context.Context) (types.RPCResponses, error) + + // Clear clears out the batch + Clear() int + + // Count returns the number of enqueued requests + Count() int +} diff --git a/tm2/pkg/bft/rpc/lib/client/http/client.go b/tm2/pkg/bft/rpc/lib/client/http/client.go new file mode 100644 index 00000000000..34d301deba2 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/http/client.go @@ -0,0 +1,245 @@ +package http + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" +) + +const ( + protoHTTP = "http" + protoHTTPS = "https" + protoWSS = "wss" + protoWS = "ws" + protoTCP = "tcp" +) + +var ( + ErrRequestResponseIDMismatch = errors.New("http request / response ID mismatch") + ErrInvalidBatchResponse = errors.New("invalid http batch response size") +) + +// Client is an HTTP client implementation +type Client struct { + rpcURL string // the remote RPC URL of the node + + client *http.Client +} + +// NewClient initializes and creates a new HTTP RPC client +func NewClient(rpcURL string) (*Client, error) { + // Parse the RPC URL + address, err := toClientAddress(rpcURL) + if err != nil { + return nil, fmt.Errorf("invalid RPC URL, %w", err) + } + + c := &Client{ + rpcURL: address, + client: defaultHTTPClient(rpcURL), + } + + return c, nil +} + +// SendRequest sends a single RPC request to the server +func (c *Client) SendRequest(ctx context.Context, request types.RPCRequest) (*types.RPCResponse, error) { + // Send the request + response, err := sendRequestCommon[types.RPCRequest, *types.RPCResponse](ctx, c.client, c.rpcURL, request) + if err != nil { + return nil, err + } + + // Make sure the ID matches + if response.ID != response.ID { + return nil, ErrRequestResponseIDMismatch + } + + return response, nil +} + +// SendBatch sends a single RPC batch request to the server +func (c *Client) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + // Send the batch + responses, err := sendRequestCommon[types.RPCRequests, types.RPCResponses](ctx, c.client, c.rpcURL, requests) + if err != nil { + return nil, err + } + + // Make sure the length matches + if len(responses) != len(requests) { + return nil, ErrInvalidBatchResponse + } + + // Make sure the IDs match + for index, response := range responses { + if requests[index].ID != response.ID { + return nil, ErrRequestResponseIDMismatch + } + } + + return responses, nil +} + +// Close has no effect on an HTTP client +func (c *Client) Close() error { + return nil +} + +type ( + requestType interface { + types.RPCRequest | types.RPCRequests + } + + responseType interface { + *types.RPCResponse | types.RPCResponses + } +) + +// sendRequestCommon executes the common request sending +func sendRequestCommon[T requestType, R responseType]( + ctx context.Context, + client *http.Client, + rpcURL string, + request T, +) (R, error) { + // Marshal the request + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, fmt.Errorf("unable to JSON-marshal the request, %w", err) + } + + // Craft the request + req, err := http.NewRequest( + http.MethodPost, + rpcURL, + bytes.NewBuffer(requestBytes), + ) + if err != nil { + return nil, fmt.Errorf("unable to create request, %w", err) + } + + // Set the header content type + req.Header.Set("Content-Type", "application/json") + + // Execute the request + httpResponse, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("unable to send request, %w", err) + } + defer httpResponse.Body.Close() //nolint: errcheck + + // Parse the response code + if !isOKStatus(httpResponse.StatusCode) { + return nil, fmt.Errorf("invalid status code received, %d", httpResponse.StatusCode) + } + + // Parse the response body + responseBytes, err := io.ReadAll(httpResponse.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body, %w", err) + } + + var response R + + if err := json.Unmarshal(responseBytes, &response); err != nil { + return nil, fmt.Errorf("unable to unmarshal response body, %w", err) + } + + return response, nil +} + +// DefaultHTTPClient is used to create an http client with some default parameters. +// We overwrite the http.Client.Dial so we can do http over tcp or unix. +// remoteAddr should be fully featured (eg. with tcp:// or unix://) +func defaultHTTPClient(remoteAddr string) *http.Client { + return &http.Client{ + Transport: &http.Transport{ + // Set to true to prevent GZIP-bomb DoS attacks + DisableCompression: true, + DialContext: func(_ context.Context, network, addr string) (net.Conn, error) { + return makeHTTPDialer(remoteAddr)(network, addr) + }, + }, + } +} + +func makeHTTPDialer(remoteAddr string) func(string, string) (net.Conn, error) { + protocol, address, err := parseRemoteAddr(remoteAddr) + if err != nil { + return func(_ string, _ string) (net.Conn, error) { + return nil, err + } + } + + // net.Dial doesn't understand http/https, so change it to TCP + switch protocol { + case protoHTTP, protoHTTPS: + protocol = protoTCP + } + + return func(proto, addr string) (net.Conn, error) { + return net.Dial(protocol, address) + } +} + +// protocol - client's protocol (for example, "http", "https", "wss", "ws", "tcp") +// trimmedS - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") with "/" replaced with "." +func toClientAddrAndParse(remoteAddr string) (string, string, error) { + protocol, address, err := parseRemoteAddr(remoteAddr) + if err != nil { + return "", "", err + } + + // protocol to use for http operations, to support both http and https + var clientProtocol string + // default to http for unknown protocols (ex. tcp) + switch protocol { + case protoHTTP, protoHTTPS, protoWS, protoWSS: + clientProtocol = protocol + default: + clientProtocol = protoHTTP + } + + // replace / with . for http requests (kvstore domain) + trimmedAddress := strings.Replace(address, "/", ".", -1) + + return clientProtocol, trimmedAddress, nil +} + +func toClientAddress(remoteAddr string) (string, error) { + clientProtocol, trimmedAddress, err := toClientAddrAndParse(remoteAddr) + if err != nil { + return "", err + } + + return clientProtocol + "://" + trimmedAddress, nil +} + +// network - name of the network (for example, "tcp", "unix") +// s - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") +// TODO: Deprecate support for IP:PORT or /path/to/socket +func parseRemoteAddr(remoteAddr string) (network string, s string, err error) { + parts := strings.SplitN(remoteAddr, "://", 2) + var protocol, address string + switch len(parts) { + case 1: + // default to tcp if nothing specified + protocol, address = protoTCP, remoteAddr + case 2: + protocol, address = parts[0], parts[1] + } + return protocol, address, nil +} + +// isOKStatus returns a boolean indicating if the response +// status code is between 200 and 299 (inclusive) +func isOKStatus(code int) bool { return code >= 200 && code <= 299 } diff --git a/tm2/pkg/bft/rpc/lib/client/http/client_test.go b/tm2/pkg/bft/rpc/lib/client/http/client_test.go new file mode 100644 index 00000000000..7c4b1e52ac5 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/http/client_test.go @@ -0,0 +1,216 @@ +package http + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClient_parseRemoteAddr(t *testing.T) { + t.Parallel() + + testTable := []struct { + remoteAddr string + network string + rest string + }{ + { + "127.0.0.1", + "tcp", + "127.0.0.1", + }, + { + "https://example.com", + "https", + "example.com", + }, + { + "wss://[::1]", + "wss", + "[::1]", + }, + } + + for _, testCase := range testTable { + testCase := testCase + + t.Run(testCase.remoteAddr, func(t *testing.T) { + t.Parallel() + + n, r, err := parseRemoteAddr(testCase.remoteAddr) + require.NoError(t, err) + + assert.Equal(t, n, testCase.network) + assert.Equal(t, r, testCase.rest) + }) + } +} + +// Following tests check that we correctly translate http/https to tcp, +// and other protocols are left intact from parseRemoteAddr() + +func TestClient_makeHTTPDialer(t *testing.T) { + t.Parallel() + + t.Run("http", func(t *testing.T) { + t.Parallel() + + _, err := makeHTTPDialer("https://.")("hello", "world") + require.Error(t, err) + + assert.Contains(t, err.Error(), "dial tcp:", "should convert https to tcp") + assert.Contains(t, err.Error(), "address .:", "should have parsed the address (as incorrect)") + }) + + t.Run("udp", func(t *testing.T) { + t.Parallel() + + _, err := makeHTTPDialer("udp://.")("hello", "world") + require.Error(t, err) + + assert.Contains(t, err.Error(), "dial udp:", "udp protocol should remain the same") + assert.Contains(t, err.Error(), "address .:", "should have parsed the address (as incorrect)") + }) +} + +// createTestServer creates a test HTTP server +func createTestServer( + t *testing.T, + handler http.Handler, +) *httptest.Server { + t.Helper() + + s := httptest.NewServer(handler) + t.Cleanup(s.Close) + + return s +} + +func TestClient_SendRequest(t *testing.T) { + t.Parallel() + + var ( + request = types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCStringID("id"), + } + + handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, http.MethodPost, r.Method) + require.Equal(t, "application/json", r.Header.Get("content-type")) + + // Parse the message + var req types.RPCRequest + require.NoError(t, json.NewDecoder(r.Body).Decode(&req)) + require.Equal(t, request.ID.String(), req.ID.String()) + + // Send an empty response back + response := types.RPCResponse{ + JSONRPC: "2.0", + ID: req.ID, + } + + // Marshal the response + marshalledResponse, err := json.Marshal(response) + require.NoError(t, err) + + _, err = w.Write(marshalledResponse) + require.NoError(t, err) + }) + + server = createTestServer(t, handler) + ) + + // Create the client + c, err := NewClient(server.URL) + require.NoError(t, err) + + ctx, cancelFn := context.WithTimeout(context.Background(), time.Second*5) + defer cancelFn() + + // Send the request + resp, err := c.SendRequest(ctx, request) + require.NoError(t, err) + + assert.Equal(t, request.ID, resp.ID) + assert.Equal(t, request.JSONRPC, resp.JSONRPC) + assert.Nil(t, resp.Result) + assert.Nil(t, resp.Error) +} + +func TestClient_SendBatchRequest(t *testing.T) { + t.Parallel() + + var ( + request = types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCStringID("id"), + } + + requests = types.RPCRequests{ + request, + request, + } + + handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, http.MethodPost, r.Method) + require.Equal(t, "application/json", r.Header.Get("content-type")) + + // Parse the message + var reqs types.RPCRequests + require.NoError(t, json.NewDecoder(r.Body).Decode(&reqs)) + require.Len(t, reqs, len(requests)) + + for _, req := range reqs { + require.Equal(t, request.ID.String(), req.ID.String()) + } + + // Send an empty response batch back + response := types.RPCResponse{ + JSONRPC: "2.0", + ID: request.ID, + } + + responses := types.RPCResponses{ + response, + response, + } + + // Marshal the responses + marshalledResponses, err := json.Marshal(responses) + require.NoError(t, err) + + _, err = w.Write(marshalledResponses) + require.NoError(t, err) + }) + + server = createTestServer(t, handler) + ) + + // Create the client + c, err := NewClient(server.URL) + require.NoError(t, err) + + ctx, cancelFn := context.WithTimeout(context.Background(), time.Second*5) + defer cancelFn() + + // Send the request + resps, err := c.SendBatch(ctx, requests) + require.NoError(t, err) + + require.Len(t, resps, len(requests)) + + for _, resp := range resps { + assert.Equal(t, request.ID, resp.ID) + assert.Equal(t, request.JSONRPC, resp.JSONRPC) + assert.Nil(t, resp.Result) + assert.Nil(t, resp.Error) + } +} diff --git a/tm2/pkg/bft/rpc/lib/client/http_client.go b/tm2/pkg/bft/rpc/lib/client/http_client.go deleted file mode 100644 index c02d029f27a..00000000000 --- a/tm2/pkg/bft/rpc/lib/client/http_client.go +++ /dev/null @@ -1,452 +0,0 @@ -package rpcclient - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - "sync" - - "github.com/gnolang/gno/tm2/pkg/amino" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/errors" - "github.com/gnolang/gno/tm2/pkg/random" -) - -const ( - protoHTTP = "http" - protoHTTPS = "https" - protoWSS = "wss" - protoWS = "ws" - protoTCP = "tcp" -) - -// HTTPClient is a common interface for JSONRPCClient and URIClient. -type HTTPClient interface { - Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) -} - -// protocol - client's protocol (for example, "http", "https", "wss", "ws", "tcp") -// trimmedS - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") with "/" replaced with "." -func toClientAddrAndParse(remoteAddr string) (network string, trimmedS string, err error) { - protocol, address, err := parseRemoteAddr(remoteAddr) - if err != nil { - return "", "", err - } - - // protocol to use for http operations, to support both http and https - var clientProtocol string - // default to http for unknown protocols (ex. tcp) - switch protocol { - case protoHTTP, protoHTTPS, protoWS, protoWSS: - clientProtocol = protocol - default: - clientProtocol = protoHTTP - } - - // replace / with . for http requests (kvstore domain) - trimmedAddress := strings.Replace(address, "/", ".", -1) - return clientProtocol, trimmedAddress, nil -} - -func toClientAddress(remoteAddr string) (string, error) { - clientProtocol, trimmedAddress, err := toClientAddrAndParse(remoteAddr) - if err != nil { - return "", err - } - return clientProtocol + "://" + trimmedAddress, nil -} - -// network - name of the network (for example, "tcp", "unix") -// s - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") -// TODO: Deprecate support for IP:PORT or /path/to/socket -func parseRemoteAddr(remoteAddr string) (network string, s string, err error) { - parts := strings.SplitN(remoteAddr, "://", 2) - var protocol, address string - switch { - case len(parts) == 1: - // default to tcp if nothing specified - protocol, address = protoTCP, remoteAddr - case len(parts) == 2: - protocol, address = parts[0], parts[1] - default: - return "", "", fmt.Errorf("invalid addr: %s", remoteAddr) - } - - return protocol, address, nil -} - -func makeErrorDialer(err error) func(string, string) (net.Conn, error) { - return func(_ string, _ string) (net.Conn, error) { - return nil, err - } -} - -func makeHTTPDialer(remoteAddr string) func(string, string) (net.Conn, error) { - protocol, address, err := parseRemoteAddr(remoteAddr) - if err != nil { - return makeErrorDialer(err) - } - - // net.Dial doesn't understand http/https, so change it to TCP - switch protocol { - case protoHTTP, protoHTTPS: - protocol = protoTCP - } - - return func(proto, addr string) (net.Conn, error) { - return net.Dial(protocol, address) - } -} - -// DefaultHTTPClient is used to create an http client with some default parameters. -// We overwrite the http.Client.Dial so we can do http over tcp or unix. -// remoteAddr should be fully featured (eg. with tcp:// or unix://) -func DefaultHTTPClient(remoteAddr string) *http.Client { - return &http.Client{ - Transport: &http.Transport{ - // Set to true to prevent GZIP-bomb DoS attacks - DisableCompression: true, - Dial: makeHTTPDialer(remoteAddr), - }, - } -} - -// ------------------------------------------------------------------------------------ - -// jsonRPCBufferedRequest encapsulates a single buffered request, as well as its -// anticipated response structure. -type jsonRPCBufferedRequest struct { - request types.RPCRequest - result interface{} // The result will be deserialized into this object. -} - -// JSONRPCRequestBatch allows us to buffer multiple request/response structures -// into a single batch request. Note that this batch acts like a FIFO queue, and -// is thread-safe. -type JSONRPCRequestBatch struct { - client *JSONRPCClient - - mtx sync.Mutex - requests []*jsonRPCBufferedRequest -} - -// JSONRPCClient takes params as a slice -type JSONRPCClient struct { - address string - client *http.Client - id types.JSONRPCStringID -} - -// JSONRPCCaller implementers can facilitate calling the JSON RPC endpoint. -type JSONRPCCaller interface { - Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) -} - -// Both JSONRPCClient and JSONRPCRequestBatch can facilitate calls to the JSON -// RPC endpoint. -var ( - _ JSONRPCCaller = (*JSONRPCClient)(nil) - _ JSONRPCCaller = (*JSONRPCRequestBatch)(nil) -) - -// NewJSONRPCClient returns a JSONRPCClient pointed at the given address. -func NewJSONRPCClient(remote string) *JSONRPCClient { - return NewJSONRPCClientWithHTTPClient(remote, DefaultHTTPClient(remote)) -} - -// NewJSONRPCClientWithHTTPClient returns a JSONRPCClient pointed at the given address using a custom http client -// The function panics if the provided client is nil or remote is invalid. -func NewJSONRPCClientWithHTTPClient(remote string, client *http.Client) *JSONRPCClient { - if client == nil { - panic("nil http.Client provided") - } - - clientAddress, err := toClientAddress(remote) - if err != nil { - panic(fmt.Sprintf("invalid remote %s: %s", remote, err)) - } - - return &JSONRPCClient{ - address: clientAddress, - client: client, - id: types.JSONRPCStringID("jsonrpc-client-" + random.RandStr(8)), - } -} - -// Call will send the request for the given method through to the RPC endpoint -// immediately, without buffering of requests. -func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - request, err := types.MapToRequest(c.id, method, params) - if err != nil { - return nil, err - } - requestBytes, err := json.Marshal(request) - if err != nil { - return nil, err - } - requestBuf := bytes.NewBuffer(requestBytes) - httpResponse, err := c.client.Post(c.address, "text/json", requestBuf) - if err != nil { - return nil, err - } - defer httpResponse.Body.Close() //nolint: errcheck - - if !statusOK(httpResponse.StatusCode) { - return nil, errors.New("server at '%s' returned %s", c.address, httpResponse.Status) - } - - responseBytes, err := io.ReadAll(httpResponse.Body) - if err != nil { - return nil, err - } - return unmarshalResponseBytes(responseBytes, c.id, result) -} - -// NewRequestBatch starts a batch of requests for this client. -func (c *JSONRPCClient) NewRequestBatch() *JSONRPCRequestBatch { - return &JSONRPCRequestBatch{ - requests: make([]*jsonRPCBufferedRequest, 0), - client: c, - } -} - -func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interface{}, error) { - reqs := make([]types.RPCRequest, 0, len(requests)) - results := make([]interface{}, 0, len(requests)) - for _, req := range requests { - reqs = append(reqs, req.request) - results = append(results, req.result) - } - // serialize the array of requests into a single JSON object - requestBytes, err := json.Marshal(reqs) - if err != nil { - return nil, err - } - httpResponse, err := c.client.Post(c.address, "text/json", bytes.NewBuffer(requestBytes)) - if err != nil { - return nil, err - } - defer httpResponse.Body.Close() //nolint: errcheck - - if !statusOK(httpResponse.StatusCode) { - return nil, errors.New("server at '%s' returned %s", c.address, httpResponse.Status) - } - - responseBytes, err := io.ReadAll(httpResponse.Body) - if err != nil { - return nil, err - } - return unmarshalResponseBytesArray(responseBytes, c.id, results) -} - -// ------------------------------------------------------------- - -// Count returns the number of enqueued requests waiting to be sent. -func (b *JSONRPCRequestBatch) Count() int { - b.mtx.Lock() - defer b.mtx.Unlock() - return len(b.requests) -} - -func (b *JSONRPCRequestBatch) enqueue(req *jsonRPCBufferedRequest) { - b.mtx.Lock() - defer b.mtx.Unlock() - b.requests = append(b.requests, req) -} - -// Clear empties out the request batch. -func (b *JSONRPCRequestBatch) Clear() int { - b.mtx.Lock() - defer b.mtx.Unlock() - return b.clear() -} - -func (b *JSONRPCRequestBatch) clear() int { - count := len(b.requests) - b.requests = make([]*jsonRPCBufferedRequest, 0) - return count -} - -// Send will attempt to send the current batch of enqueued requests, and then -// will clear out the requests once done. On success, this returns the -// deserialized list of results from each of the enqueued requests. -func (b *JSONRPCRequestBatch) Send() ([]interface{}, error) { - b.mtx.Lock() - defer func() { - b.clear() - b.mtx.Unlock() - }() - return b.client.sendBatch(b.requests) -} - -// Call enqueues a request to call the given RPC method with the specified -// parameters, in the same way that the `JSONRPCClient.Call` function would. -func (b *JSONRPCRequestBatch) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - request, err := types.MapToRequest(b.client.id, method, params) - if err != nil { - return nil, err - } - b.enqueue(&jsonRPCBufferedRequest{request: request, result: result}) - return result, nil -} - -// ------------------------------------------------------------- - -// URI takes params as a map -type URIClient struct { - address string - client *http.Client -} - -// The function panics if the provided remote is invalid. -func NewURIClient(remote string) *URIClient { - clientAddress, err := toClientAddress(remote) - if err != nil { - panic(fmt.Sprintf("invalid remote %s: %s", remote, err)) - } - return &URIClient{ - address: clientAddress, - client: DefaultHTTPClient(remote), - } -} - -func (c *URIClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - values, err := argsToURLValues(params) - if err != nil { - return nil, err - } - // log.Info(Fmt("URI request to %v (%v): %v", c.address, method, values)) - resp, err := c.client.PostForm(c.address+"/"+method, values) - if err != nil { - return nil, err - } - defer resp.Body.Close() //nolint: errcheck - - if !statusOK(resp.StatusCode) { - return nil, errors.New("server at '%s' returned %s", c.address, resp.Status) - } - - responseBytes, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return unmarshalResponseBytes(responseBytes, "", result) -} - -// ------------------------------------------------ - -func unmarshalResponseBytes(responseBytes []byte, expectedID types.JSONRPCStringID, result interface{}) (interface{}, error) { - // Read response. If rpc/core/types is imported, the result will unmarshal - // into the correct type. - // log.Notice("response", "response", string(responseBytes)) - var err error - response := &types.RPCResponse{} - err = json.Unmarshal(responseBytes, response) - if err != nil { - return nil, errors.Wrap(err, "error unmarshalling rpc response") - } - if response.Error != nil { - return nil, errors.Wrap(response.Error, "response error") - } - // From the JSON-RPC 2.0 spec: - // id: It MUST be the same as the value of the id member in the Request Object. - if err := validateResponseID(response, expectedID); err != nil { - return nil, err - } - // Unmarshal the RawMessage into the result. - err = amino.UnmarshalJSON(response.Result, result) - if err != nil { - return nil, errors.Wrap(err, "error unmarshalling rpc response result") - } - return result, nil -} - -func unmarshalResponseBytesArray(responseBytes []byte, expectedID types.JSONRPCStringID, results []interface{}) ([]interface{}, error) { - var ( - err error - responses []types.RPCResponse - ) - err = json.Unmarshal(responseBytes, &responses) - if err != nil { - return nil, errors.Wrap(err, "error unmarshalling rpc response") - } - // No response error checking here as there may be a mixture of successful - // and unsuccessful responses. - - if len(results) != len(responses) { - return nil, errors.New("expected %d result objects into which to inject responses, but got %d", len(responses), len(results)) - } - - for i, response := range responses { - response := response - // From the JSON-RPC 2.0 spec: - // id: It MUST be the same as the value of the id member in the Request Object. - if err := validateResponseID(&response, expectedID); err != nil { - return nil, errors.Wrap(err, "failed to validate response ID in response %d", i) - } - if err := amino.UnmarshalJSON(responses[i].Result, results[i]); err != nil { - return nil, errors.Wrap(err, "error unmarshalling rpc response result") - } - } - return results, nil -} - -func validateResponseID(res *types.RPCResponse, expectedID types.JSONRPCStringID) error { - // we only validate a response ID if the expected ID is non-empty - if len(expectedID) == 0 { - return nil - } - if res.ID == nil { - return errors.New("missing ID in response") - } - id, ok := res.ID.(types.JSONRPCStringID) - if !ok { - return errors.New("expected ID string in response but got: %v", id) - } - if expectedID != id { - return errors.New("response ID (%s) does not match request ID (%s)", id, expectedID) - } - return nil -} - -func argsToURLValues(args map[string]interface{}) (url.Values, error) { - values := make(url.Values) - if len(args) == 0 { - return values, nil - } - err := argsToJSON(args) - if err != nil { - return nil, err - } - for key, val := range args { - values.Set(key, val.(string)) - } - return values, nil -} - -func argsToJSON(args map[string]interface{}) error { - for k, v := range args { - rt := reflect.TypeOf(v) - isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 - if isByteSlice { - bytes := reflect.ValueOf(v).Bytes() - args[k] = fmt.Sprintf("0x%X", bytes) - continue - } - - data, err := amino.MarshalJSON(v) - if err != nil { - return err - } - args[k] = string(data) - } - return nil -} - -func statusOK(code int) bool { return code >= 200 && code <= 299 } diff --git a/tm2/pkg/bft/rpc/lib/client/http_client_test.go b/tm2/pkg/bft/rpc/lib/client/http_client_test.go deleted file mode 100644 index 476f2857fa6..00000000000 --- a/tm2/pkg/bft/rpc/lib/client/http_client_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package rpcclient - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_parseRemoteAddr(t *testing.T) { - t.Parallel() - - tt := []struct { - remoteAddr string - network, s, errContains string - }{ - {"127.0.0.1", "tcp", "127.0.0.1", ""}, - {"https://example.com", "https", "example.com", ""}, - {"wss://[::1]", "wss", "[::1]", ""}, - // no error cases - they cannot happen! - } - - for _, tc := range tt { - n, s, err := parseRemoteAddr(tc.remoteAddr) - if tc.errContains != "" { - _ = assert.NotNil(t, err) && assert.Contains(t, err.Error(), tc.errContains) - } - assert.NoError(t, err) - assert.Equal(t, tc.network, n) - assert.Equal(t, tc.s, s) - } -} - -// Following tests check that we correctly translate http/https to tcp, -// and other protocols are left intact from parseRemoteAddr() - -func Test_makeHTTPDialer(t *testing.T) { - t.Parallel() - - dl := makeHTTPDialer("https://.") - _, err := dl("hello", "world") - if assert.NotNil(t, err) { - e := err.Error() - assert.Contains(t, e, "dial tcp:", "should convert https to tcp") - assert.Contains(t, e, "address .:", "should have parsed the address (as incorrect)") - } -} - -func Test_makeHTTPDialer_noConvert(t *testing.T) { - t.Parallel() - - dl := makeHTTPDialer("udp://.") - _, err := dl("hello", "world") - if assert.NotNil(t, err) { - e := err.Error() - assert.Contains(t, e, "dial udp:", "udp protocol should remain the same") - assert.Contains(t, e, "address .:", "should have parsed the address (as incorrect)") - } -} diff --git a/tm2/pkg/bft/rpc/lib/client/integration_test.go b/tm2/pkg/bft/rpc/lib/client/integration_test.go deleted file mode 100644 index 85b4c94594b..00000000000 --- a/tm2/pkg/bft/rpc/lib/client/integration_test.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build release - -// The code in here is comprehensive as an integration -// test and is long, hence is only run before releases. - -package rpcclient - -import ( - "bytes" - "errors" - "net" - "regexp" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/log" -) - -func TestWSClientReconnectWithJitter(t *testing.T) { - t.Parallel() - - n := 8 - maxReconnectAttempts := 3 - // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... - maxSleepTime := time.Second * time.Duration(((1< server write routine +func (c *Client) runWriteRoutine(ctx context.Context) { + for { + select { + case <-ctx.Done(): + c.logger.Debug("write context finished") + + return + case item := <-c.backlog: + // Write the JSON request to the server + if err := c.conn.WriteJSON(item); err != nil { + c.logger.Error("unable to send request", "err", err) + + continue + } + + c.logger.Debug("successfully sent request", "request", item) + } + } +} + +// runReadRoutine runs the client <- server read routine +func (c *Client) runReadRoutine(ctx context.Context) { + for { + select { + case <-ctx.Done(): + c.logger.Debug("read context finished") + + return + default: + } + + // Read the message from the active connection + _, data, err := c.conn.ReadMessage() + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) { + c.logger.Error("failed to read response", "err", err) + + // Server dropped the connection, stop the client + if err = c.closeWithCause( + fmt.Errorf("server closed connection, %w", err), + ); err != nil { + c.logger.Error("unable to gracefully close client", "err", err) + } + + return + } + + continue + } + + var ( + responses types.RPCResponses + responseHash string + ) + + // Try to unmarshal as a batch of responses first + if err := json.Unmarshal(data, &responses); err != nil { + // Try to unmarshal as a single response + var response types.RPCResponse + + if err := json.Unmarshal(data, &response); err != nil { + c.logger.Error("failed to parse response", "err", err, "data", string(data)) + + continue + } + + // This is a single response, generate the unique ID + responseHash = generateIDHash(response.ID.String()) + responses = types.RPCResponses{response} + } else { + // This is a batch response, generate the unique ID + // from the combined IDs + ids := make([]string, 0, len(responses)) + + for _, response := range responses { + ids = append(ids, response.ID.String()) + } + + responseHash = generateIDHash(ids...) + } + + // Grab the response channel + c.requestMapMux.Lock() + ch := c.requestMap[responseHash] + if ch == nil { + c.requestMapMux.Unlock() + c.logger.Error("response listener not set", "hash", responseHash, "responses", responses) + + continue + } + + // Clear the entry for this ID + delete(c.requestMap, responseHash) + c.requestMapMux.Unlock() + + c.logger.Debug("received response", "hash", responseHash) + + // Alert the listener of the response + select { + case ch <- responses: + default: + c.logger.Warn("response listener timed out", "hash", responseHash) + } + } +} + +// Close closes the WS client +func (c *Client) Close() error { + return c.closeWithCause(nil) +} + +// closeWithCause closes the client (and any open connection) +// with the given cause +func (c *Client) closeWithCause(err error) error { + c.cancelCauseFn(err) + + return c.conn.Close() +} diff --git a/tm2/pkg/bft/rpc/lib/client/ws/client_test.go b/tm2/pkg/bft/rpc/lib/client/ws/client_test.go new file mode 100644 index 00000000000..c80b98b624f --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/ws/client_test.go @@ -0,0 +1,302 @@ +package ws + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// createTestServer creates a test WS server +func createTestServer( + t *testing.T, + handler http.Handler, +) *httptest.Server { + t.Helper() + + s := httptest.NewServer(handler) + t.Cleanup(s.Close) + + return s +} + +func TestClient_SendRequest(t *testing.T) { + t.Parallel() + + t.Run("request timed out", func(t *testing.T) { + t.Parallel() + + var ( + upgrader = websocket.Upgrader{} + + request = types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCStringID("id"), + } + ) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + // Create the server + handler := func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + require.NoError(t, err) + + defer c.Close() + + for { + _, message, err := c.ReadMessage() + if websocket.IsUnexpectedCloseError(err) { + return + } + + require.NoError(t, err) + + // Parse the message + var req types.RPCRequest + require.NoError(t, json.Unmarshal(message, &req)) + require.Equal(t, request.ID.String(), req.ID.String()) + + // Simulate context cancellation mid-request parsing + cancelFn() + } + } + + s := createTestServer(t, http.HandlerFunc(handler)) + url := "ws" + strings.TrimPrefix(s.URL, "http") + + // Create the client + c, err := NewClient(url) + require.NoError(t, err) + + defer func() { + assert.NoError(t, c.Close()) + }() + + // Try to send the request, but wait for + // the context to be cancelled + response, err := c.SendRequest(ctx, request) + require.Nil(t, response) + + assert.ErrorIs(t, err, ErrTimedOut) + }) + + t.Run("valid request sent", func(t *testing.T) { + t.Parallel() + + var ( + upgrader = websocket.Upgrader{} + + request = types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCStringID("id"), + } + + response = types.RPCResponse{ + JSONRPC: "2.0", + ID: request.ID, + } + ) + + // Create the server + handler := func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + require.NoError(t, err) + + defer c.Close() + + for { + mt, message, err := c.ReadMessage() + if websocket.IsUnexpectedCloseError(err) { + return + } + + require.NoError(t, err) + + // Parse the message + var req types.RPCRequest + require.NoError(t, json.Unmarshal(message, &req)) + require.Equal(t, request.ID.String(), req.ID.String()) + + marshalledResponse, err := json.Marshal(response) + require.NoError(t, err) + + require.NoError(t, c.WriteMessage(mt, marshalledResponse)) + } + } + + s := createTestServer(t, http.HandlerFunc(handler)) + url := "ws" + strings.TrimPrefix(s.URL, "http") + + // Create the client + c, err := NewClient(url) + require.NoError(t, err) + + defer func() { + assert.NoError(t, c.Close()) + }() + + // Try to send the valid request + ctx, cancelFn := context.WithTimeout(context.Background(), time.Second*5) + defer cancelFn() + + resp, err := c.SendRequest(ctx, request) + require.NoError(t, err) + + assert.Equal(t, response.ID, resp.ID) + assert.Equal(t, response.JSONRPC, resp.JSONRPC) + assert.Equal(t, response.Result, resp.Result) + assert.Equal(t, response.Error, resp.Error) + }) +} + +func TestClient_SendBatch(t *testing.T) { + t.Parallel() + + t.Run("batch timed out", func(t *testing.T) { + t.Parallel() + + var ( + upgrader = websocket.Upgrader{} + + request = types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCStringID("id"), + } + + batch = types.RPCRequests{request} + ) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + // Create the server + handler := func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + require.NoError(t, err) + + defer c.Close() + + for { + _, message, err := c.ReadMessage() + if websocket.IsUnexpectedCloseError(err) { + return + } + + require.NoError(t, err) + + // Parse the message + var req types.RPCRequests + require.NoError(t, json.Unmarshal(message, &req)) + + require.Len(t, req, 1) + require.Equal(t, request.ID.String(), req[0].ID.String()) + + // Simulate context cancellation mid-request parsing + cancelFn() + } + } + + s := createTestServer(t, http.HandlerFunc(handler)) + url := "ws" + strings.TrimPrefix(s.URL, "http") + + // Create the client + c, err := NewClient(url) + require.NoError(t, err) + + defer func() { + assert.NoError(t, c.Close()) + }() + + // Try to send the request, but wait for + // the context to be cancelled + response, err := c.SendBatch(ctx, batch) + require.Nil(t, response) + + assert.ErrorIs(t, err, ErrTimedOut) + }) + + t.Run("valid batch sent", func(t *testing.T) { + t.Parallel() + + var ( + upgrader = websocket.Upgrader{} + + request = types.RPCRequest{ + JSONRPC: "2.0", + ID: types.JSONRPCStringID("id"), + } + + response = types.RPCResponse{ + JSONRPC: "2.0", + ID: request.ID, + } + + batch = types.RPCRequests{request} + batchResponse = types.RPCResponses{response} + ) + + // Create the server + handler := func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + require.NoError(t, err) + + defer c.Close() + + for { + mt, message, err := c.ReadMessage() + if websocket.IsUnexpectedCloseError(err) { + return + } + + require.NoError(t, err) + + // Parse the message + var req types.RPCRequests + require.NoError(t, json.Unmarshal(message, &req)) + + require.Len(t, req, 1) + require.Equal(t, request.ID.String(), req[0].ID.String()) + + marshalledResponse, err := json.Marshal(batchResponse) + require.NoError(t, err) + + require.NoError(t, c.WriteMessage(mt, marshalledResponse)) + } + } + + s := createTestServer(t, http.HandlerFunc(handler)) + url := "ws" + strings.TrimPrefix(s.URL, "http") + + // Create the client + c, err := NewClient(url) + require.NoError(t, err) + + defer func() { + assert.NoError(t, c.Close()) + }() + + // Try to send the valid request + ctx, cancelFn := context.WithTimeout(context.Background(), time.Second*5) + defer cancelFn() + + resp, err := c.SendBatch(ctx, batch) + require.NoError(t, err) + + require.Len(t, resp, 1) + + assert.Equal(t, response.ID, resp[0].ID) + assert.Equal(t, response.JSONRPC, resp[0].JSONRPC) + assert.Equal(t, response.Result, resp[0].Result) + assert.Equal(t, response.Error, resp[0].Error) + }) +} diff --git a/tm2/pkg/bft/rpc/lib/client/ws/options.go b/tm2/pkg/bft/rpc/lib/client/ws/options.go new file mode 100644 index 00000000000..c98e8923b22 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/ws/options.go @@ -0,0 +1,14 @@ +package ws + +import ( + "log/slog" +) + +type Option func(*Client) + +// WithLogger sets the WS client logger +func WithLogger(logger *slog.Logger) Option { + return func(c *Client) { + c.logger = logger + } +} diff --git a/tm2/pkg/bft/rpc/lib/client/ws/options_test.go b/tm2/pkg/bft/rpc/lib/client/ws/options_test.go new file mode 100644 index 00000000000..2378b346b83 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/client/ws/options_test.go @@ -0,0 +1,38 @@ +package ws + +import ( + "io" + "log/slog" + "net/http" + "strings" + "testing" + + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClient_WithLogger(t *testing.T) { + t.Parallel() + + var ( + upgrader = websocket.Upgrader{} + + handler = func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + + require.NoError(t, err) + require.NoError(t, c.Close()) + } + ) + + s := createTestServer(t, http.HandlerFunc(handler)) + url := "ws" + strings.TrimPrefix(s.URL, "http") + + // Create the client + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + c, err := NewClient(url, WithLogger(logger)) + require.NoError(t, err) + + assert.Equal(t, logger, c.logger) +} diff --git a/tm2/pkg/bft/rpc/lib/client/ws_client.go b/tm2/pkg/bft/rpc/lib/client/ws_client.go deleted file mode 100644 index 4e159a3e3dc..00000000000 --- a/tm2/pkg/bft/rpc/lib/client/ws_client.go +++ /dev/null @@ -1,467 +0,0 @@ -package rpcclient - -import ( - "context" - "encoding/json" - "fmt" - "net" - "net/http" - "sync" - "time" - - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/errors" - "github.com/gnolang/gno/tm2/pkg/random" - "github.com/gnolang/gno/tm2/pkg/service" - "github.com/gorilla/websocket" -) - -const ( - defaultMaxReconnectAttempts = 25 - defaultWriteWait = 0 - defaultReadWait = 0 - defaultPingPeriod = 0 -) - -// WSClient is a WebSocket client. The methods of WSClient are safe for use by -// multiple goroutines. -type WSClient struct { - service.BaseService - - conn *websocket.Conn - - Address string // IP:PORT or /path/to/socket - Endpoint string // /websocket/url/endpoint - Dialer func(string, string) (net.Conn, error) - - // Single user facing channel to read RPCResponses from, closed only when the client is being stopped. - ResponsesCh chan types.RPCResponse - - // Callback, which will be called each time after successful reconnect. - onReconnect func() - - // internal channels - send chan types.RPCRequest // user requests - backlog chan types.RPCRequest // stores a single user request received during a conn failure - reconnectAfter chan error // reconnect requests - readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine - - wg sync.WaitGroup - - mtx sync.RWMutex - sentLastPingAt time.Time - reconnecting bool - - // Maximum reconnect attempts (0 or greater; default: 25). - maxReconnectAttempts int - - // Time allowed to write a message to the server. 0 means block until operation succeeds. - writeWait time.Duration - - // Time allowed to read the next message from the server. 0 means block until operation succeeds. - readWait time.Duration - - // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. - pingPeriod time.Duration - - // Support both ws and wss protocols - protocol string -} - -// NewWSClient returns a new client. See the commentary on the func(*WSClient) -// functions for a detailed description of how to configure ping period and -// pong wait time. The endpoint argument must begin with a `/`. -// The function panics if the provided address is invalid. -func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSClient { - protocol, addr, err := toClientAddrAndParse(remoteAddr) - if err != nil { - panic(fmt.Sprintf("invalid remote %s: %s", remoteAddr, err)) - } - // default to ws protocol, unless wss is explicitly specified - if protocol != "wss" { - protocol = "ws" - } - - c := &WSClient{ - Address: addr, - Dialer: makeHTTPDialer(remoteAddr), - Endpoint: endpoint, - - maxReconnectAttempts: defaultMaxReconnectAttempts, - readWait: defaultReadWait, - writeWait: defaultWriteWait, - pingPeriod: defaultPingPeriod, - protocol: protocol, - } - c.BaseService = *service.NewBaseService(nil, "WSClient", c) - for _, option := range options { - option(c) - } - return c -} - -// MaxReconnectAttempts sets the maximum number of reconnect attempts before returning an error. -// It should only be used in the constructor and is not Goroutine-safe. -func MaxReconnectAttempts(max int) func(*WSClient) { - return func(c *WSClient) { - c.maxReconnectAttempts = max - } -} - -// ReadWait sets the amount of time to wait before a websocket read times out. -// It should only be used in the constructor and is not Goroutine-safe. -func ReadWait(readWait time.Duration) func(*WSClient) { - return func(c *WSClient) { - c.readWait = readWait - } -} - -// WriteWait sets the amount of time to wait before a websocket write times out. -// It should only be used in the constructor and is not Goroutine-safe. -func WriteWait(writeWait time.Duration) func(*WSClient) { - return func(c *WSClient) { - c.writeWait = writeWait - } -} - -// PingPeriod sets the duration for sending websocket pings. -// It should only be used in the constructor - not Goroutine-safe. -func PingPeriod(pingPeriod time.Duration) func(*WSClient) { - return func(c *WSClient) { - c.pingPeriod = pingPeriod - } -} - -// OnReconnect sets the callback, which will be called every time after -// successful reconnect. -func OnReconnect(cb func()) func(*WSClient) { - return func(c *WSClient) { - c.onReconnect = cb - } -} - -// String returns WS client full address. -func (c *WSClient) String() string { - return fmt.Sprintf("%s (%s)", c.Address, c.Endpoint) -} - -// OnStart implements service.Service by dialing a server and creating read and -// write routines. -func (c *WSClient) OnStart() error { - err := c.dial() - if err != nil { - return err - } - - c.ResponsesCh = make(chan types.RPCResponse) - - c.send = make(chan types.RPCRequest) - // 1 additional error may come from the read/write - // goroutine depending on which failed first. - c.reconnectAfter = make(chan error, 1) - // capacity for 1 request. a user won't be able to send more because the send - // channel is unbuffered. - c.backlog = make(chan types.RPCRequest, 1) - - c.startReadWriteRoutines() - go c.reconnectRoutine() - - return nil -} - -// Stop overrides service.Service#Stop. There is no other way to wait until Quit -// channel is closed. -func (c *WSClient) Stop() error { - if err := c.BaseService.Stop(); err != nil { - return err - } - // only close user-facing channels when we can't write to them - c.wg.Wait() - close(c.ResponsesCh) - - return nil -} - -// IsReconnecting returns true if the client is reconnecting right now. -func (c *WSClient) IsReconnecting() bool { - c.mtx.RLock() - defer c.mtx.RUnlock() - return c.reconnecting -} - -// IsActive returns true if the client is running and not reconnecting. -func (c *WSClient) IsActive() bool { - return c.IsRunning() && !c.IsReconnecting() -} - -// Send the given RPC request to the server. Results will be available on -// ResponsesCh, errors, if any, on ErrorsCh. Will block until send succeeds or -// ctx.Done is closed. -func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { - select { - case c.send <- request: - c.Logger.Info("sent a request", "req", request) - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Call the given method. See Send description. -func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error { - request, err := types.MapToRequest(types.JSONRPCStringID("ws-client"), method, params) - if err != nil { - return err - } - return c.Send(ctx, request) -} - -// CallWithArrayParams the given method with params in a form of array. See -// Send description. -func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error { - request, err := types.ArrayToRequest(types.JSONRPCStringID("ws-client"), method, params) - if err != nil { - return err - } - return c.Send(ctx, request) -} - -// ----------- -// Private methods - -func (c *WSClient) dial() error { - dialer := &websocket.Dialer{ - NetDial: c.Dialer, - Proxy: http.ProxyFromEnvironment, - } - rHeader := http.Header{} - conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) - if err != nil { - return err - } - c.conn = conn - return nil -} - -// reconnect tries to redial up to maxReconnectAttempts with exponential -// backoff. -func (c *WSClient) reconnect() error { - attempt := 0 - - c.mtx.Lock() - c.reconnecting = true - c.mtx.Unlock() - defer func() { - c.mtx.Lock() - c.reconnecting = false - c.mtx.Unlock() - }() - - for { - jitter := time.Duration(random.RandFloat64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitter + ((1 << uint(attempt)) * time.Second) - - c.Logger.Info("reconnecting", "attempt", attempt+1, "backoff_duration", backoffDuration) - time.Sleep(backoffDuration) - - err := c.dial() - if err != nil { - c.Logger.Error("failed to redial", "err", err) - } else { - c.Logger.Info("reconnected") - if c.onReconnect != nil { - go c.onReconnect() - } - return nil - } - - attempt++ - - if attempt > c.maxReconnectAttempts { - return errors.Wrap(err, "reached maximum reconnect attempts") - } - } -} - -func (c *WSClient) startReadWriteRoutines() { - c.wg.Add(2) - c.readRoutineQuit = make(chan struct{}) - go c.readRoutine() - go c.writeRoutine() -} - -func (c *WSClient) processBacklog() error { - select { - case request := <-c.backlog: - if c.writeWait > 0 { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { - c.Logger.Error("failed to set write deadline", "err", err) - } - } - if err := c.conn.WriteJSON(request); err != nil { - c.Logger.Error("failed to resend request", "err", err) - c.reconnectAfter <- err - // requeue request - c.backlog <- request - return err - } - c.Logger.Info("resend a request", "req", request) - default: - } - return nil -} - -func (c *WSClient) reconnectRoutine() { - for { - select { - case originalError := <-c.reconnectAfter: - // wait until writeRoutine and readRoutine finish - c.wg.Wait() - if err := c.reconnect(); err != nil { - c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) - c.Stop() - return - } - // drain reconnectAfter - LOOP: - for { - select { - case <-c.reconnectAfter: - default: - break LOOP - } - } - err := c.processBacklog() - if err == nil { - c.startReadWriteRoutines() - } - - case <-c.Quit(): - return - } - } -} - -// The client ensures that there is at most one writer to a connection by -// executing all writes from this goroutine. -func (c *WSClient) writeRoutine() { - var ticker *time.Ticker - if c.pingPeriod > 0 { - // ticker with a predefined period - ticker = time.NewTicker(c.pingPeriod) - } else { - // ticker that never fires - ticker = &time.Ticker{C: make(<-chan time.Time)} - } - - defer func() { - ticker.Stop() - c.conn.Close() - // err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - // } - c.wg.Done() - }() - - for { - select { - case request := <-c.send: - if c.writeWait > 0 { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { - c.Logger.Error("failed to set write deadline", "err", err) - } - } - if err := c.conn.WriteJSON(request); err != nil { - c.Logger.Error("failed to send request", "err", err) - c.reconnectAfter <- err - // add request to the backlog, so we don't lose it - c.backlog <- request - return - } - case <-ticker.C: - if c.writeWait > 0 { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { - c.Logger.Error("failed to set write deadline", "err", err) - } - } - if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { - c.Logger.Error("failed to write ping", "err", err) - c.reconnectAfter <- err - return - } - c.mtx.Lock() - c.sentLastPingAt = time.Now() - c.mtx.Unlock() - c.Logger.Debug("sent ping") - case <-c.readRoutineQuit: - return - case <-c.Quit(): - if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { - c.Logger.Error("failed to write message", "err", err) - } - return - } - } -} - -// The client ensures that there is at most one reader to a connection by -// executing all reads from this goroutine. -func (c *WSClient) readRoutine() { - defer func() { - c.conn.Close() - // err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - // } - c.wg.Done() - }() - - c.conn.SetPongHandler(func(string) error { - /* - TODO latency metrics - // gather latency stats - c.mtx.RLock() - t := c.sentLastPingAt - c.mtx.RUnlock() - */ - - c.Logger.Debug("got pong") - return nil - }) - - for { - // reset deadline for every message type (control or data) - if c.readWait > 0 { - if err := c.conn.SetReadDeadline(time.Now().Add(c.readWait)); err != nil { - c.Logger.Error("failed to set read deadline", "err", err) - } - } - _, data, err := c.conn.ReadMessage() - if err != nil { - if !websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) { - return - } - - c.Logger.Error("failed to read response", "err", err) - close(c.readRoutineQuit) - c.reconnectAfter <- err - return - } - - var response types.RPCResponse - err = json.Unmarshal(data, &response) - if err != nil { - c.Logger.Error("failed to parse response", "err", err, "data", string(data)) - continue - } - c.Logger.Info("got response", "resp", response.Result) - // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking - // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop - // both readRoutine and writeRoutine - select { - case <-c.Quit(): - case c.ResponsesCh <- response: - } - } -} diff --git a/tm2/pkg/bft/rpc/lib/client/ws_client_test.go b/tm2/pkg/bft/rpc/lib/client/ws_client_test.go deleted file mode 100644 index c902ee709e0..00000000000 --- a/tm2/pkg/bft/rpc/lib/client/ws_client_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package rpcclient - -import ( - "context" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "sync" - "testing" - "time" - - "github.com/gorilla/websocket" - "github.com/stretchr/testify/require" - - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/log" -) - -var wsCallTimeout = 5 * time.Second - -type myHandler struct { - closeConnAfterRead bool - mtx sync.RWMutex -} - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - panic(err) - } - defer conn.Close() //nolint: errcheck - for { - messageType, _, err := conn.ReadMessage() - if err != nil { - return - } - - h.mtx.RLock() - if h.closeConnAfterRead { - if err := conn.Close(); err != nil { - panic(err) - } - } - h.mtx.RUnlock() - - res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res}) - if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { - return - } - } -} - -func TestWSClientReconnectsAfterReadFailure(t *testing.T) { - t.Parallel() - - var wg sync.WaitGroup - - // start server - h := &myHandler{} - s := httptest.NewServer(h) - defer s.Close() - - c := startClient(t, s.Listener.Addr()) - defer c.Stop() - - wg.Add(1) - go callWgDoneOnResult(t, c, &wg) - - h.mtx.Lock() - h.closeConnAfterRead = true - h.mtx.Unlock() - - // results in WS read error, no send retry because write succeeded - call(t, "a", c) - - // expect to reconnect almost immediately - time.Sleep(10 * time.Millisecond) - h.mtx.Lock() - h.closeConnAfterRead = false - h.mtx.Unlock() - - // should succeed - call(t, "b", c) - - wg.Wait() -} - -func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { - t.Parallel() - - var wg sync.WaitGroup - - // start server - h := &myHandler{} - s := httptest.NewServer(h) - - c := startClient(t, s.Listener.Addr()) - defer c.Stop() - - wg.Add(2) - go callWgDoneOnResult(t, c, &wg) - - // hacky way to abort the connection before write - if err := c.conn.Close(); err != nil { - t.Error(err) - } - - // results in WS write error, the client should resend on reconnect - call(t, "a", c) - - // expect to reconnect almost immediately - time.Sleep(10 * time.Millisecond) - - // should succeed - call(t, "b", c) - - wg.Wait() -} - -func TestWSClientReconnectFailure(t *testing.T) { - t.Parallel() - - // start server - h := &myHandler{} - s := httptest.NewServer(h) - - c := startClient(t, s.Listener.Addr()) - defer c.Stop() - - go func() { - for { - select { - case <-c.ResponsesCh: - case <-c.Quit(): - return - } - } - }() - - // hacky way to abort the connection before write - if err := c.conn.Close(); err != nil { - t.Error(err) - } - s.Close() - - // results in WS write error - // provide timeout to avoid blocking - ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) - defer cancel() - if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { - t.Error(err) - } - - // expect to reconnect almost immediately - time.Sleep(10 * time.Millisecond) - - done := make(chan struct{}) - go func() { - // client should block on this - call(t, "b", c) - close(done) - }() - - // test that client blocks on the second send - select { - case <-done: - t.Fatal("client should block on calling 'b' during reconnect") - case <-time.After(5 * time.Second): - t.Log("All good") - } -} - -func TestNotBlockingOnStop(t *testing.T) { - t.Parallel() - - timeout := 2 * time.Second - s := httptest.NewServer(&myHandler{}) - c := startClient(t, s.Listener.Addr()) - c.Call(context.Background(), "a", make(map[string]interface{})) - // Let the readRoutine get around to blocking - time.Sleep(time.Second) - passCh := make(chan struct{}) - go func() { - // Unless we have a non-blocking write to ResponsesCh from readRoutine - // this blocks forever ont the waitgroup - c.Stop() - passCh <- struct{}{} - }() - select { - case <-passCh: - // Pass - case <-time.After(timeout): - t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", - timeout.Seconds()) - } -} - -func startClient(t *testing.T, addr net.Addr) *WSClient { - t.Helper() - - c := NewWSClient(addr.String(), "/websocket") - err := c.Start() - require.Nil(t, err) - c.SetLogger(log.NewTestingLogger(t)) - return c -} - -func call(t *testing.T, method string, c *WSClient) { - t.Helper() - - err := c.Call(context.Background(), method, make(map[string]interface{})) - require.NoError(t, err) -} - -func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { - t.Helper() - - for { - select { - case resp := <-c.ResponsesCh: - if resp.Error != nil { - t.Errorf("unexpected error: %v", resp.Error) - return - } - if resp.Result != nil { - wg.Done() - } - case <-c.Quit(): - return - } - } -} diff --git a/tm2/pkg/bft/rpc/lib/rpc_test.go b/tm2/pkg/bft/rpc/lib/rpc_test.go deleted file mode 100644 index 386e641cb53..00000000000 --- a/tm2/pkg/bft/rpc/lib/rpc_test.go +++ /dev/null @@ -1,395 +0,0 @@ -package rpc - -import ( - "bytes" - "context" - crand "crypto/rand" - "encoding/json" - "fmt" - "net/http" - "os" - "os/exec" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/log" - "github.com/gnolang/gno/tm2/pkg/random" - - client "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" - server "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Client and Server should work over tcp or unix sockets -const ( - tcpAddr = "tcp://0.0.0.0:47768" - tcpServerUnavailableAddr = "tcp://0.0.0.0:47769" - - unixSocket = "/tmp/rpc_test.sock" - unixAddr = "unix://" + unixSocket - - websocketEndpoint = "/websocket/endpoint" - - testVal = "acbd" -) - -type ResultEcho struct { - Value string `json:"value"` -} - -type ResultEchoInt struct { - Value int `json:"value"` -} - -type ResultEchoBytes struct { - Value []byte `json:"value"` -} - -type ResultEchoDataBytes struct { - Value []byte `json:"value"` -} - -// Define some routes -var Routes = map[string]*server.RPCFunc{ - "echo": server.NewRPCFunc(EchoResult, "arg"), - "echo_ws": server.NewWSRPCFunc(EchoWSResult, "arg"), - "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"), - "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"), - "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), -} - -func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { - return &ResultEcho{v}, nil -} - -func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { - return &ResultEcho{v}, nil -} - -func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { - return &ResultEchoInt{v}, nil -} - -func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { - return &ResultEchoBytes{v}, nil -} - -func EchoDataBytesResult(ctx *types.Context, v []byte) (*ResultEchoDataBytes, error) { - return &ResultEchoDataBytes{v}, nil -} - -func TestMain(m *testing.M) { - setup() - code := m.Run() - os.Exit(code) -} - -// launch unix and tcp servers -func setup() { - logger := log.NewNoopLogger() - - cmd := exec.Command("rm", "-f", unixSocket) - err := cmd.Start() - if err != nil { - panic(err) - } - if err = cmd.Wait(); err != nil { - panic(err) - } - - tcpLogger := logger.With("socket", "tcp") - mux := http.NewServeMux() - server.RegisterRPCFuncs(mux, Routes, tcpLogger) - wm := server.NewWebsocketManager(Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) - wm.SetLogger(tcpLogger) - mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - config := server.DefaultConfig() - listener1, err := server.Listen(tcpAddr, config) - if err != nil { - panic(err) - } - go server.StartHTTPServer(listener1, mux, tcpLogger, config) - - unixLogger := logger.With("socket", "unix") - mux2 := http.NewServeMux() - server.RegisterRPCFuncs(mux2, Routes, unixLogger) - wm = server.NewWebsocketManager(Routes) - wm.SetLogger(unixLogger) - mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - listener2, err := server.Listen(unixAddr, config) - if err != nil { - panic(err) - } - go server.StartHTTPServer(listener2, mux2, unixLogger, config) - - listener3, err := server.Listen(tcpServerUnavailableAddr, config) - if err != nil { - panic(err) - } - mux3 := http.NewServeMux() - mux3.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "oups", http.StatusTeapot) - }) - go server.StartHTTPServer(listener3, mux3, tcpLogger, config) - - // wait for servers to start - time.Sleep(time.Second * 2) -} - -func echoViaHTTP(cl client.HTTPClient, val string) (string, error) { - params := map[string]interface{}{ - "arg": val, - } - result := new(ResultEcho) - if _, err := cl.Call("echo", params, result); err != nil { - return "", err - } - return result.Value, nil -} - -func echoIntViaHTTP(cl client.HTTPClient, val int) (int, error) { - params := map[string]interface{}{ - "arg": val, - } - result := new(ResultEchoInt) - if _, err := cl.Call("echo_int", params, result); err != nil { - return 0, err - } - return result.Value, nil -} - -func echoBytesViaHTTP(cl client.HTTPClient, bytes []byte) ([]byte, error) { - params := map[string]interface{}{ - "arg": bytes, - } - result := new(ResultEchoBytes) - if _, err := cl.Call("echo_bytes", params, result); err != nil { - return []byte{}, err - } - return result.Value, nil -} - -func echoDataBytesViaHTTP(cl client.HTTPClient, bytes []byte) ([]byte, error) { - params := map[string]interface{}{ - "arg": bytes, - } - result := new(ResultEchoDataBytes) - if _, err := cl.Call("echo_data_bytes", params, result); err != nil { - return []byte{}, err - } - return result.Value, nil -} - -func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { - t.Helper() - - val := testVal - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) - - val2 := randBytes(t) - got2, err := echoBytesViaHTTP(cl, val2) - require.Nil(t, err) - assert.Equal(t, got2, val2) - - val3 := randBytes(t) - got3, err := echoDataBytesViaHTTP(cl, val3) - require.Nil(t, err) - assert.Equal(t, got3, val3) - - val4 := random.RandIntn(10000) - got4, err := echoIntViaHTTP(cl, val4) - require.Nil(t, err) - assert.Equal(t, got4, val4) -} - -func echoViaWS(cl *client.WSClient, val string) (string, error) { - params := map[string]interface{}{ - "arg": val, - } - err := cl.Call(context.Background(), "echo", params) - if err != nil { - return "", err - } - - msg := <-cl.ResponsesCh - if msg.Error != nil { - return "", err - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - if err != nil { - return "", nil - } - return result.Value, nil -} - -func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { - params := map[string]interface{}{ - "arg": bytes, - } - err := cl.Call(context.Background(), "echo_bytes", params) - if err != nil { - return []byte{}, err - } - - msg := <-cl.ResponsesCh - if msg.Error != nil { - return []byte{}, msg.Error - } - result := new(ResultEchoBytes) - err = json.Unmarshal(msg.Result, result) - if err != nil { - return []byte{}, nil - } - return result.Value, nil -} - -func testWithWSClient(t *testing.T, cl *client.WSClient) { - t.Helper() - - val := testVal - got, err := echoViaWS(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) - - val2 := randBytes(t) - got2, err := echoBytesViaWS(cl, val2) - require.Nil(t, err) - assert.Equal(t, got2, val2) -} - -// ------------- - -func TestServersAndClientsBasic(t *testing.T) { - t.Parallel() - - serverAddrs := [...]string{tcpAddr, unixAddr} - for _, addr := range serverAddrs { - cl1 := client.NewURIClient(addr) - fmt.Printf("=== testing server on %s using URI client", addr) - testWithHTTPClient(t, cl1) - - cl2 := client.NewJSONRPCClient(addr) - fmt.Printf("=== testing server on %s using JSONRPC client", addr) - testWithHTTPClient(t, cl2) - - cl3 := client.NewWSClient(addr, websocketEndpoint) - cl3.SetLogger(log.NewTestingLogger(t)) - err := cl3.Start() - require.Nil(t, err) - fmt.Printf("=== testing server on %s using WS client", addr) - testWithWSClient(t, cl3) - cl3.Stop() - } - - cl1 := client.NewURIClient(tcpServerUnavailableAddr) - _, err := cl1.Call("error", nil, nil) - require.EqualError(t, err, "server at 'http://0.0.0.0:47769' returned 418 I'm a teapot") - - cl2 := client.NewJSONRPCClient(tcpServerUnavailableAddr) - _, err = cl2.Call("error", nil, nil) - require.EqualError(t, err, "server at 'http://0.0.0.0:47769' returned 418 I'm a teapot") -} - -func TestHexStringArg(t *testing.T) { - t.Parallel() - - cl := client.NewURIClient(tcpAddr) - // should NOT be handled as hex - val := "0xabc" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} - -func TestQuotedStringArg(t *testing.T) { - t.Parallel() - - cl := client.NewURIClient(tcpAddr) - // should NOT be unquoted - val := "\"abc\"" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} - -func TestWSNewWSRPCFunc(t *testing.T) { - t.Parallel() - - cl := client.NewWSClient(tcpAddr, websocketEndpoint) - cl.SetLogger(log.NewTestingLogger(t)) - err := cl.Start() - require.Nil(t, err) - defer cl.Stop() - - val := testVal - params := map[string]interface{}{ - "arg": val, - } - err = cl.Call(context.Background(), "echo_ws", params) - require.Nil(t, err) - - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatal(err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} - -func TestWSHandlesArrayParams(t *testing.T) { - t.Parallel() - - cl := client.NewWSClient(tcpAddr, websocketEndpoint) - cl.SetLogger(log.NewTestingLogger(t)) - err := cl.Start() - require.Nil(t, err) - defer cl.Stop() - - val := testVal - params := []interface{}{val} - err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) - require.Nil(t, err) - - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatalf("%+v", err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} - -// TestWSClientPingPong checks that a client & server exchange pings -// & pongs so connection stays alive. -func TestWSClientPingPong(t *testing.T) { - t.Parallel() - - cl := client.NewWSClient(tcpAddr, websocketEndpoint) - cl.SetLogger(log.NewTestingLogger(t)) - err := cl.Start() - require.Nil(t, err) - defer cl.Stop() - - time.Sleep(6 * time.Second) -} - -func randBytes(t *testing.T) []byte { - t.Helper() - - n := random.RandIntn(10) + 2 - buf := make([]byte, n) - _, err := crand.Read(buf) - require.Nil(t, err) - return bytes.Replace(buf, []byte("="), []byte{100}, -1) -} diff --git a/tm2/pkg/bft/rpc/lib/server/handlers.go b/tm2/pkg/bft/rpc/lib/server/handlers.go index 1957d9a9fc0..417f417ba26 100644 --- a/tm2/pkg/bft/rpc/lib/server/handlers.go +++ b/tm2/pkg/bft/rpc/lib/server/handlers.go @@ -117,8 +117,8 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger *slog.Logger) http.H // first try to unmarshal the incoming request as an array of RPC requests var ( - requests []types.RPCRequest - responses []types.RPCResponse + requests types.RPCRequests + responses types.RPCResponses ) if err := json.Unmarshal(b, &requests); err != nil { // next, try to unmarshal as a single request @@ -438,7 +438,7 @@ type wsConnection struct { remoteAddr string baseConn *websocket.Conn - writeChan chan types.RPCResponse + writeChan chan types.RPCResponses funcMap map[string]*RPCFunc @@ -543,7 +543,7 @@ func ReadLimit(readLimit int64) func(*wsConnection) { // OnStart implements service.Service by starting the read and write routines. It // blocks until the connection closes. func (wsc *wsConnection) OnStart() error { - wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) + wsc.writeChan = make(chan types.RPCResponses, wsc.writeChanCapacity) // Read subscriptions/unsubscriptions to events go wsc.readRoutine() @@ -556,7 +556,7 @@ func (wsc *wsConnection) OnStart() error { // OnStop implements service.Service by unsubscribing remoteAddr from all subscriptions. func (wsc *wsConnection) OnStop() { // Both read and write loops close the websocket connection when they exit their loops. - // The writeChan is never closed, to allow WriteRPCResponse() to fail. + // The writeChan is never closed, to allow WriteRPCResponses() to fail. if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) @@ -575,7 +575,7 @@ func (wsc *wsConnection) GetRemoteAddr() string { // WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. // It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { +func (wsc *wsConnection) WriteRPCResponses(resp types.RPCResponses) { select { case <-wsc.Quit(): return @@ -585,7 +585,7 @@ func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { // TryWriteRPCResponse attempts to push a response to the writeChan, but does not block. // It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { +func (wsc *wsConnection) TryWriteRPCResponses(resp types.RPCResponses) bool { select { case <-wsc.Quit(): return false @@ -615,7 +615,7 @@ func (wsc *wsConnection) readRoutine() { err = fmt.Errorf("WSJSONRPC: %v", r) } wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - wsc.WriteRPCResponse(types.RPCInternalError(types.JSONRPCStringID("unknown"), err)) + wsc.WriteRPCResponses(types.RPCResponses{types.RPCInternalError(types.JSONRPCStringID("unknown"), err)}) go wsc.readRoutine() } else { wsc.baseConn.Close() //nolint: errcheck @@ -647,50 +647,81 @@ func (wsc *wsConnection) readRoutine() { return } - var request types.RPCRequest - err = json.Unmarshal(in, &request) - if err != nil { - wsc.WriteRPCResponse(types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshalling request"))) - continue - } + // first try to unmarshal the incoming request as an array of RPC requests + var ( + requests types.RPCRequests + responses types.RPCResponses + ) + + // Try to unmarshal the requests as a batch + if err := json.Unmarshal(in, &requests); err != nil { + // Next, try to unmarshal as a single request + var request types.RPCRequest + if err := json.Unmarshal(in, &request); err != nil { + wsc.WriteRPCResponses( + types.RPCResponses{ + types.RPCParseError( + types.JSONRPCStringID(""), + errors.Wrap(err, "error unmarshalling request"), + ), + }, + ) + + return + } - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == types.JSONRPCStringID("") { - wsc.Logger.Debug("WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") - continue + requests = []types.RPCRequest{request} } - // Now, fetch the RPCFunc and execute it. - rpcFunc := wsc.funcMap[request.Method] - if rpcFunc == nil { - wsc.WriteRPCResponse(types.RPCMethodNotFoundError(request.ID)) - continue - } + for _, request := range requests { + request := request + + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == types.JSONRPCStringID("") { + wsc.Logger.Debug("Skipping notification JSON-RPC request") - ctx := &types.Context{JSONReq: &request, WSConn: wsc} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) - if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) continue } - args = append(args, fnArgs...) - } - returns := rpcFunc.f.Call(args) + // Now, fetch the RPCFunc and execute it. + rpcFunc := wsc.funcMap[request.Method] + if rpcFunc == nil { + responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + + continue + } - // TODO: Need to encode args/returns to string if we want to log them - wsc.Logger.Info("WSJSONRPC", "method", request.Method) + ctx := &types.Context{JSONReq: &request, WSConn: wsc} + args := []reflect.Value{reflect.ValueOf(ctx)} + if len(request.Params) > 0 { + fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) + if err != nil { + responses = append(responses, types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) - result, err := unreflectResult(returns) - if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, err)) - continue - } + continue + } + args = append(args, fnArgs...) + } + + returns := rpcFunc.f.Call(args) + + // TODO: Need to encode args/returns to string if we want to log them + wsc.Logger.Info("WSJSONRPC", "method", request.Method) + + result, err := unreflectResult(returns) + if err != nil { + responses = append(responses, types.RPCInternalError(request.ID, err)) + + continue + } + + responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) - wsc.WriteRPCResponse(types.NewRPCSuccessResponse(request.ID, result)) + if len(responses) > 0 { + wsc.WriteRPCResponses(responses) + } + } } } } @@ -729,8 +760,16 @@ func (wsc *wsConnection) writeRoutine() { wsc.Stop() return } - case msg := <-wsc.writeChan: - jsonBytes, err := json.MarshalIndent(msg, "", " ") + case msgs := <-wsc.writeChan: + var writeData any + + if len(msgs) == 1 { + writeData = msgs[0] + } else { + writeData = msgs + } + + jsonBytes, err := json.MarshalIndent(writeData, "", " ") if err != nil { wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) } else if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { diff --git a/tm2/pkg/bft/rpc/lib/server/handlers_test.go b/tm2/pkg/bft/rpc/lib/server/handlers_test.go index 75c64151619..f6572be7e0a 100644 --- a/tm2/pkg/bft/rpc/lib/server/handlers_test.go +++ b/tm2/pkg/bft/rpc/lib/server/handlers_test.go @@ -194,7 +194,7 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } - var responses []types.RPCResponse + var responses types.RPCResponses // try to unmarshal an array first err = json.Unmarshal(blob, &responses) if err != nil { @@ -211,7 +211,7 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } // have a single-element result - responses = []types.RPCResponse{response} + responses = types.RPCResponses{response} } } if tt.expectCount != len(responses) { diff --git a/tm2/pkg/bft/rpc/lib/server/http_server.go b/tm2/pkg/bft/rpc/lib/server/http_server.go index 23ac851512f..a4e535160b5 100644 --- a/tm2/pkg/bft/rpc/lib/server/http_server.go +++ b/tm2/pkg/bft/rpc/lib/server/http_server.go @@ -118,7 +118,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { // WriteRPCResponseArrayHTTP will do the same as WriteRPCResponseHTTP, except it // can write arrays of responses for batched request/response interactions via // the JSON RPC. -func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res []types.RPCResponse) { +func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res types.RPCResponses) { if len(res) == 1 { WriteRPCResponseHTTP(w, res[0]) } else { diff --git a/tm2/pkg/bft/rpc/lib/test/data.json b/tm2/pkg/bft/rpc/lib/test/data.json deleted file mode 100644 index 83283ec33fb..00000000000 --- a/tm2/pkg/bft/rpc/lib/test/data.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "jsonrpc": "2.0", - "id": "", - "method": "hello_world", - "params": { - "name": "my_world", - "num": 5 - } -} diff --git a/tm2/pkg/bft/rpc/lib/test/integration_test.sh b/tm2/pkg/bft/rpc/lib/test/integration_test.sh deleted file mode 100755 index 7c23be7d3b9..00000000000 --- a/tm2/pkg/bft/rpc/lib/test/integration_test.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -# Change into that dir because we expect that. -pushd "$DIR" - -echo "==> Building the server" -go build -o rpcserver main.go - -echo "==> (Re)starting the server" -PID=$(pgrep rpcserver || echo "") -if [[ $PID != "" ]]; then - kill -9 "$PID" -fi -./rpcserver & -PID=$! -sleep 2 - -echo "==> simple request" -R1=$(curl -s 'http://localhost:8008/hello_world?name="my_world"&num=5') -R2=$(curl -s --data @data.json http://localhost:8008) -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with 0x-prefixed hex string arg" -R1=$(curl -s 'http://localhost:8008/hello_world?name=0x41424344&num=123') -R2='{"jsonrpc":"2.0","id":"","result":{"Result":"hi ABCD 123"},"error":""}' -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with missing params" -R1=$(curl -s 'http://localhost:8008/hello_world') -R2='{"jsonrpc":"2.0","id":"","result":{"Result":"hi 0"},"error":""}' -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with unquoted string arg" -R1=$(curl -s 'http://localhost:8008/hello_world?name=abcd&num=123') -R2="{\"jsonrpc\":\"2.0\",\"id\":\"\",\"result\":null,\"error\":\"Error converting http params to args: invalid character 'a' looking for beginning of value\"}" -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with string type when expecting number arg" -R1=$(curl -s 'http://localhost:8008/hello_world?name="abcd"&num=0xabcd') -R2="{\"jsonrpc\":\"2.0\",\"id\":\"\",\"result\":null,\"error\":\"Error converting http params to args: Got a hex string arg, but expected 'int'\"}" -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> Stopping the server" -kill -9 $PID - -rm -f rpcserver - -popd -exit 0 diff --git a/tm2/pkg/bft/rpc/lib/test/main.go b/tm2/pkg/bft/rpc/lib/test/main.go deleted file mode 100644 index 3fd8ea0bf61..00000000000 --- a/tm2/pkg/bft/rpc/lib/test/main.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - - "github.com/gnolang/gno/tm2/pkg/log" - osm "github.com/gnolang/gno/tm2/pkg/os" - - rpcserver "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -var routes = map[string]*rpcserver.RPCFunc{ - "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num"), -} - -func HelloWorld(ctx *rpctypes.Context, name string, num int) (Result, error) { - return Result{fmt.Sprintf("hi %s %d", name, num)}, nil -} - -type Result struct { - Result string -} - -func main() { - var ( - mux = http.NewServeMux() - logger = log.NewNoopLogger() - ) - - // Stop upon receiving SIGTERM or CTRL-C. - osm.TrapSignal(func() {}) - - rpcserver.RegisterRPCFuncs(mux, routes, logger) - config := rpcserver.DefaultConfig() - listener, err := rpcserver.Listen("0.0.0.0:8008", config) - if err != nil { - osm.Exit(err.Error()) - } - rpcserver.StartHTTPServer(listener, mux, logger, config) -} diff --git a/tm2/pkg/bft/rpc/lib/types/types.go b/tm2/pkg/bft/rpc/lib/types/types.go index 65cafd79fe5..e1d165e6e54 100644 --- a/tm2/pkg/bft/rpc/lib/types/types.go +++ b/tm2/pkg/bft/rpc/lib/types/types.go @@ -6,30 +6,34 @@ import ( "fmt" "net/http" "reflect" - "strings" "github.com/gnolang/gno/tm2/pkg/amino" "github.com/gnolang/gno/tm2/pkg/errors" ) -// a wrapper to emulate a sum type: jsonrpcid = string | int -// TODO: refactor when Go 2.0 arrives https://github.com/golang/go/issues/19412 -type jsonrpcid interface { - isJSONRPCID() +// JSONRPCID is a wrapper type for JSON-RPC request IDs, +// which can be a string value | number value | not set (nil) +type JSONRPCID interface { + String() string } // JSONRPCStringID a wrapper for JSON-RPC string IDs type JSONRPCStringID string -func (JSONRPCStringID) isJSONRPCID() {} +func (id JSONRPCStringID) String() string { + return string(id) +} // JSONRPCIntID a wrapper for JSON-RPC integer IDs type JSONRPCIntID int -func (JSONRPCIntID) isJSONRPCID() {} +func (id JSONRPCIntID) String() string { + return fmt.Sprintf("%d", id) +} -func idFromInterface(idInterface interface{}) (jsonrpcid, error) { - switch id := idInterface.(type) { +// parseID parses the given ID value +func parseID(idValue any) (JSONRPCID, error) { + switch id := idValue.(type) { case string: return JSONRPCStringID(id), nil case float64: @@ -49,38 +53,45 @@ func idFromInterface(idInterface interface{}) (jsonrpcid, error) { type RPCRequest struct { JSONRPC string `json:"jsonrpc"` - ID jsonrpcid `json:"id"` + ID JSONRPCID `json:"id"` Method string `json:"method"` Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} } -// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int +// UnmarshalJSON custom JSON unmarshalling due to JSONRPCID being string or int func (request *RPCRequest) UnmarshalJSON(data []byte) error { unsafeReq := &struct { JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id"` + ID any `json:"id"` Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} + Params json.RawMessage `json:"params"` // must be map[string]any or []any }{} - err := json.Unmarshal(data, &unsafeReq) - if err != nil { - return err + + if err := json.Unmarshal(data, &unsafeReq); err != nil { + return fmt.Errorf("unable to JSON-parse the RPC request, %w", err) } + request.JSONRPC = unsafeReq.JSONRPC request.Method = unsafeReq.Method request.Params = unsafeReq.Params + + // Check if the ID is set if unsafeReq.ID == nil { return nil } - id, err := idFromInterface(unsafeReq.ID) + + // Parse the ID + id, err := parseID(unsafeReq.ID) if err != nil { - return err + return fmt.Errorf("unable to parse request ID, %w", err) } + request.ID = id + return nil } -func NewRPCRequest(id jsonrpcid, method string, params json.RawMessage) RPCRequest { +func NewRPCRequest(id JSONRPCID, method string, params json.RawMessage) RPCRequest { return RPCRequest{ JSONRPC: "2.0", ID: id, @@ -93,38 +104,25 @@ func (request RPCRequest) String() string { return fmt.Sprintf("[%s %s]", request.ID, request.Method) } -func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { +// MapToRequest generates an RPC request with the given ID and method. +// The params are encoded as a JSON map +func MapToRequest(id JSONRPCID, method string, params map[string]any) (RPCRequest, error) { params_ := make(map[string]json.RawMessage, len(params)) for name, value := range params { valueJSON, err := amino.MarshalJSON(value) if err != nil { - return RPCRequest{}, err + return RPCRequest{}, fmt.Errorf("unable to parse param, %w", err) } + params_[name] = valueJSON } - payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. - if err != nil { - return RPCRequest{}, err - } - request := NewRPCRequest(id, method, payload) - return request, nil -} -func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { - params_ := make([]json.RawMessage, len(params)) - for i, value := range params { - valueJSON, err := amino.MarshalJSON(value) - if err != nil { - return RPCRequest{}, err - } - params_[i] = valueJSON - } payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. if err != nil { - return RPCRequest{}, err + return RPCRequest{}, fmt.Errorf("unable to JSON marshal params, %w", err) } - request := NewRPCRequest(id, method, payload) - return request, nil + + return NewRPCRequest(id, method, payload), nil } // ---------------------------------------- @@ -137,21 +135,27 @@ type RPCError struct { } func (err RPCError) Error() string { - const baseFormat = "RPC error %v - %s" + const baseFormat = "RPC error %d - %s" if err.Data != "" { return fmt.Sprintf(baseFormat+": %s", err.Code, err.Message, err.Data) } + return fmt.Sprintf(baseFormat, err.Code, err.Message) } type RPCResponse struct { JSONRPC string `json:"jsonrpc"` - ID jsonrpcid `json:"id"` + ID JSONRPCID `json:"id"` Result json.RawMessage `json:"result,omitempty"` Error *RPCError `json:"error,omitempty"` } -// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int +type ( + RPCRequests []RPCRequest + RPCResponses []RPCResponse +) + +// UnmarshalJSON custom JSON unmarshalling due to JSONRPCID being string or int func (response *RPCResponse) UnmarshalJSON(data []byte) error { unsafeResp := &struct { JSONRPC string `json:"jsonrpc"` @@ -159,25 +163,33 @@ func (response *RPCResponse) UnmarshalJSON(data []byte) error { Result json.RawMessage `json:"result,omitempty"` Error *RPCError `json:"error,omitempty"` }{} - err := json.Unmarshal(data, &unsafeResp) - if err != nil { - return err + + // Parse the response + if err := json.Unmarshal(data, &unsafeResp); err != nil { + return fmt.Errorf("unable to JSON-parse the RPC response, %w", err) } + response.JSONRPC = unsafeResp.JSONRPC response.Error = unsafeResp.Error response.Result = unsafeResp.Result + + // Check if any response ID is set if unsafeResp.ID == nil { return nil } - id, err := idFromInterface(unsafeResp.ID) + + // Parse the ID + id, err := parseID(unsafeResp.ID) if err != nil { - return err + return fmt.Errorf("unable to parse response ID, %w", err) } + response.ID = id + return nil } -func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { +func NewRPCSuccessResponse(id JSONRPCID, res any) RPCResponse { var rawMsg json.RawMessage if res != nil { @@ -186,13 +198,13 @@ func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { if err != nil { return RPCInternalError(id, errors.Wrap(err, "Error marshalling response")) } - rawMsg = json.RawMessage(js) + rawMsg = js } return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} } -func NewRPCErrorResponse(id jsonrpcid, code int, msg string, data string) RPCResponse { +func NewRPCErrorResponse(id JSONRPCID, code int, msg string, data string) RPCResponse { return RPCResponse{ JSONRPC: "2.0", ID: id, @@ -207,40 +219,36 @@ func (response RPCResponse) String() string { return fmt.Sprintf("[%s %s]", response.ID, response.Error) } -func RPCParseError(id jsonrpcid, err error) RPCResponse { +func RPCParseError(id JSONRPCID, err error) RPCResponse { return NewRPCErrorResponse(id, -32700, "Parse error. Invalid JSON", err.Error()) } -func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse { +func RPCInvalidRequestError(id JSONRPCID, err error) RPCResponse { return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) } -func RPCMethodNotFoundError(id jsonrpcid) RPCResponse { +func RPCMethodNotFoundError(id JSONRPCID) RPCResponse { return NewRPCErrorResponse(id, -32601, "Method not found", "") } -func RPCInvalidParamsError(id jsonrpcid, err error) RPCResponse { +func RPCInvalidParamsError(id JSONRPCID, err error) RPCResponse { return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) } -func RPCInternalError(id jsonrpcid, err error) RPCResponse { +func RPCInternalError(id JSONRPCID, err error) RPCResponse { return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) } -func RPCServerError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) -} - // ---------------------------------------- // WSRPCConnection represents a websocket connection. type WSRPCConnection interface { // GetRemoteAddr returns a remote address of the connection. GetRemoteAddr() string - // WriteRPCResponse writes the resp onto connection (BLOCKING). - WriteRPCResponse(resp RPCResponse) - // TryWriteRPCResponse tries to write the resp onto connection (NON-BLOCKING). - TryWriteRPCResponse(resp RPCResponse) bool + // WriteRPCResponses writes the resp onto connection (BLOCKING). + WriteRPCResponses(resp RPCResponses) + // TryWriteRPCResponses tries to write the resp onto connection (NON-BLOCKING). + TryWriteRPCResponses(resp RPCResponses) bool // Context returns the connection's context. Context() context.Context } @@ -296,17 +304,3 @@ func (ctx *Context) Context() context.Context { } return context.Background() } - -// ---------------------------------------- -// SOCKETS - -// Determine if its a unix or tcp socket. -// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port -// TODO: deprecate -func SocketType(listenAddr string) string { - socketType := "unix" - if len(strings.Split(listenAddr, ":")) >= 2 { - socketType = "tcp" - } - return socketType -} diff --git a/tm2/pkg/bft/rpc/lib/types/types_test.go b/tm2/pkg/bft/rpc/lib/types/types_test.go index 55ee8ed3945..ff50c1b6c15 100644 --- a/tm2/pkg/bft/rpc/lib/types/types_test.go +++ b/tm2/pkg/bft/rpc/lib/types/types_test.go @@ -6,82 +6,133 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/gnolang/gno/tm2/pkg/errors" ) -type SampleResult struct { - Value string -} +func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { + t.Parallel() -type responseTest struct { - id jsonrpcid - expected string -} + testTable := []struct { + name string + id JSONRPCID + expectedID string + }{ + { + "short string", + JSONRPCStringID("1"), + `"1"`, + }, + { + "long string", + JSONRPCStringID("alphabet"), + `"alphabet"`, + }, + { + "empty string", + JSONRPCStringID(""), + `""`, + }, + { + "unicode string", + JSONRPCStringID("àáâ"), + `"àáâ"`, + }, + { + "negative number", + JSONRPCIntID(-1), + "-1", + }, + { + "zero ID", + JSONRPCIntID(0), + "0", + }, + { + "non-zero ID", + JSONRPCIntID(100), + "100", + }, + } -var responseTests = []responseTest{ - {JSONRPCStringID("1"), `"1"`}, - {JSONRPCStringID("alphabet"), `"alphabet"`}, - {JSONRPCStringID(""), `""`}, - {JSONRPCStringID("àáâ"), `"àáâ"`}, - {JSONRPCIntID(-1), "-1"}, - {JSONRPCIntID(0), "0"}, - {JSONRPCIntID(1), "1"}, - {JSONRPCIntID(100), "100"}, -} + for _, testCase := range testTable { + testCase := testCase -func TestResponses(t *testing.T) { - t.Parallel() + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() - assert := assert.New(t) - for _, tt := range responseTests { - jsonid := tt.id - a := NewRPCSuccessResponse(jsonid, &SampleResult{"hello"}) - b, _ := json.Marshal(a) - s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) - assert.Equal(s, string(b)) - - d := RPCParseError(jsonid, errors.New("Hello world")) - e, _ := json.Marshal(d) - f := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`, tt.expected) - assert.Equal(f, string(e)) - - g := RPCMethodNotFoundError(jsonid) - h, _ := json.Marshal(g) - i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, tt.expected) - assert.Equal(string(h), i) - } -} + t.Run("marshal", func(t *testing.T) { + t.Parallel() -func TestUnmarshallResponses(t *testing.T) { - t.Parallel() + data, err := json.Marshal( + NewRPCSuccessResponse(testCase.id, struct { + Value string + }{ + Value: "hello", + }, + ), + ) + require.NoError(t, err) - assert := assert.New(t) - for _, tt := range responseTests { - response := &RPCResponse{} - err := json.Unmarshal([]byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)), response) - assert.Nil(err) - a := NewRPCSuccessResponse(tt.id, &SampleResult{"hello"}) - assert.Equal(*response, a) - } - response := &RPCResponse{} - err := json.Unmarshal([]byte(`{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}`), response) - assert.NotNil(err) -} + assert.Equal( + t, + fmt.Sprintf( + `{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, + testCase.expectedID, + ), + string(data), + ) -func TestRPCError(t *testing.T) { - t.Parallel() + data, err = json.Marshal(RPCParseError(testCase.id, errors.New("Hello world"))) + require.NoError(t, err) + + assert.Equal( + t, + fmt.Sprintf( + `{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`, + testCase.expectedID, + ), + string(data), + ) + + data, err = json.Marshal(RPCMethodNotFoundError(testCase.id)) + require.NoError(t, err) + + assert.Equal( + t, + fmt.Sprintf( + `{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, + testCase.expectedID, + ), + string(data), + ) + }) - assert.Equal(t, "RPC error 12 - Badness: One worse than a code 11", - fmt.Sprintf("%v", &RPCError{ - Code: 12, - Message: "Badness", - Data: "One worse than a code 11", - })) - - assert.Equal(t, "RPC error 12 - Badness", - fmt.Sprintf("%v", &RPCError{ - Code: 12, - Message: "Badness", - })) + t.Run("unmarshal", func(t *testing.T) { + t.Parallel() + + var expectedResponse RPCResponse + + assert.NoError( + t, + json.Unmarshal( + []byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, testCase.expectedID)), + &expectedResponse, + ), + ) + + successResponse := NewRPCSuccessResponse( + testCase.id, + struct { + Value string + }{ + Value: "hello", + }, + ) + + assert.Equal(t, expectedResponse, successResponse) + }) + }) + } } diff --git a/tm2/pkg/bft/rpc/test/helpers.go b/tm2/pkg/bft/rpc/test/helpers.go deleted file mode 100644 index d934cf27a64..00000000000 --- a/tm2/pkg/bft/rpc/test/helpers.go +++ /dev/null @@ -1,148 +0,0 @@ -package rpctest - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - cfg "github.com/gnolang/gno/tm2/pkg/bft/config" - nm "github.com/gnolang/gno/tm2/pkg/bft/node" - "github.com/gnolang/gno/tm2/pkg/bft/privval" - "github.com/gnolang/gno/tm2/pkg/bft/proxy" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpcclient "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" - "github.com/gnolang/gno/tm2/pkg/log" - "github.com/gnolang/gno/tm2/pkg/p2p" -) - -// Options helps with specifying some parameters for our RPC testing for greater -// control. -type Options struct { - suppressStdout bool - recreateConfig bool - genesisPath string -} - -var ( - // This entire testing insanity is removed in: - // https://github.com/gnolang/gno/pull/1498 - globalConfig *cfg.Config - globalGenesis string - - defaultOptions = Options{ - recreateConfig: false, - genesisPath: "genesis.json", - } -) - -func waitForRPC() { - cfg, _ := GetConfig() - laddr := cfg.RPC.ListenAddress - client := rpcclient.NewJSONRPCClient(laddr) - result := new(ctypes.ResultStatus) - for { - _, err := client.Call("status", map[string]interface{}{}, result) - if err == nil { - return - } else { - fmt.Println("error", err) - time.Sleep(time.Millisecond) - } - } -} - -// f**ing long, but unique for each test -func makePathname() string { - // get path - p, err := os.Getwd() - if err != nil { - panic(err) - } - // fmt.Println(p) - sep := string(filepath.Separator) - return strings.Replace(p, sep, "_", -1) -} - -func createConfig() (*cfg.Config, string) { - pathname := makePathname() - c, genesisFile := cfg.ResetTestRoot(pathname) - - // and we use random ports to run in parallel - c.P2P.ListenAddress = "tcp://127.0.0.1:0" - c.RPC.ListenAddress = "tcp://127.0.0.1:0" - c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - // c.TxIndex.IndexTags = "app.creator,tx.height" // see kvstore application - return c, genesisFile -} - -// GetConfig returns a config for the test cases as a singleton -func GetConfig(forceCreate ...bool) (*cfg.Config, string) { - if globalConfig == nil || globalGenesis == "" || (len(forceCreate) > 0 && forceCreate[0]) { - globalConfig, globalGenesis = createConfig() - } - return globalConfig, globalGenesis -} - -// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized -func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node { - nodeOpts := defaultOptions - for _, opt := range opts { - opt(&nodeOpts) - } - node := newTendermint(app, &nodeOpts) - err := node.Start() - if err != nil { - panic(err) - } - - // wait for rpc - waitForRPC() - - return node -} - -// StopTendermint stops a test tendermint server, waits until it's stopped and -// cleans up test/config files. -func StopTendermint(node *nm.Node) { - node.Stop() - node.Wait() - os.RemoveAll(node.Config().RootDir) -} - -// newTendermint creates a new tendermint server and sleeps forever -func newTendermint(app abci.Application, opts *Options) *nm.Node { - // Create & start node - config, genesisFile := GetConfig(opts.recreateConfig) - - pvKeyFile := config.PrivValidatorKeyFile() - pvKeyStateFile := config.PrivValidatorStateFile() - pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) - papp := proxy.NewLocalClientCreator(app) - nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) - if err != nil { - panic(err) - } - node, err := nm.NewNode(config, pv, nodeKey, papp, - nm.DefaultGenesisDocProviderFunc(genesisFile), - nm.DefaultDBProvider, - log.NewNoopLogger()) - if err != nil { - panic(err) - } - return node -} - -// SuppressStdout is an option that tries to make sure the RPC test Tendermint -// node doesn't log anything to stdout. -func SuppressStdout(o *Options) { - o.suppressStdout = true -} - -// RecreateConfig instructs the RPC test to recreate the configuration each -// time, instead of treating it as a global singleton. -func RecreateConfig(o *Options) { - o.recreateConfig = true -} diff --git a/tm2/pkg/crypto/keys/client/broadcast.go b/tm2/pkg/crypto/keys/client/broadcast.go index 3eafc88109a..423714b2141 100644 --- a/tm2/pkg/crypto/keys/client/broadcast.go +++ b/tm2/pkg/crypto/keys/client/broadcast.go @@ -100,7 +100,10 @@ func BroadcastHandler(cfg *BroadcastCfg) (*ctypes.ResultBroadcastTxCommit, error return nil, errors.Wrap(err, "remarshaling tx binary bytes") } - cli := client.NewHTTP(remote, "/websocket") + cli, err := client.NewHTTPClient(remote) + if err != nil { + return nil, err + } if cfg.DryRun { return SimulateTx(cli, bz) diff --git a/tm2/pkg/crypto/keys/client/query.go b/tm2/pkg/crypto/keys/client/query.go index a9a6764c773..e44bb796b9d 100644 --- a/tm2/pkg/crypto/keys/client/query.go +++ b/tm2/pkg/crypto/keys/client/query.go @@ -100,7 +100,11 @@ func QueryHandler(cfg *QueryCfg) (*ctypes.ResultABCIQuery, error) { // Height: height, XXX // Prove: false, XXX } - cli := client.NewHTTP(remote, "/websocket") + cli, err := client.NewHTTPClient(remote) + if err != nil { + return nil, errors.Wrap(err, "new http client") + } + qres, err := cli.ABCIQueryWithOptions( cfg.Path, data, opts2) if err != nil {