Skip to content

Commit

Permalink
Feature/#713 autoclaim l2deposits (#714)
Browse files Browse the repository at this point in the history
* ClaimTxManager claims L2Bridges

* e2e wip + fixes

* e2e test

* gha autoclaiml2l2 e2e

* reduce code + linter

* order query by id + fix some logs

* Update claimcompressor smc to latest version

* update test balance due to the new claimcompressor deployment cost
  • Loading branch information
ARR552 authored Dec 19, 2024
1 parent ef6ba36 commit 6489a6b
Show file tree
Hide file tree
Showing 20 changed files with 270 additions and 108 deletions.
27 changes: 27 additions & 0 deletions .github/workflows/test-autoclaiml2l2.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: Test
on:
push:
branches:
- main
- master
- develop
- update-external-dependencies
pull_request:
jobs:
test-autoclaiml2l2:
strategy:
matrix:
go-version: [ 1.21.x ]
goarch: [ "amd64" ]
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go-version }}
env:
GOARCH: ${{ matrix.goarch }}
- name: Test
run: make test-autoclaiml2l2
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,11 @@ test-l2l2: build-docker stop run-multi-single-bridge ## Runs all tests checking
sleep 3
trap '$(STOP)' EXIT; MallocNanoZone=0 go test -v -failfast -race -p 1 -timeout 2400s ./test/e2e/... -count 1 -tags='l2l2'

.PHONY: test-autoclaiml2l2
test-autoclaiml2l2: build-docker stop run-multi-single-bridge ## Runs all tests checking race conditions
sleep 3
trap '$(STOP)' EXIT; MallocNanoZone=0 go test -v -failfast -race -p 1 -timeout 2400s ./test/e2e/... -count 1 -tags='autoclaiml2l2'

.PHONY: test-e2ecompress
test-e2ecompress: build-docker stop run-multi-single-bridge ## Runs all tests checking race conditions
sleep 3
Expand Down
2 changes: 1 addition & 1 deletion autoclaimservice/autoclaim/autoclaim.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func (ac *autoclaim) claimGrouped() error {
log.Errorf("error compressing claim data, Error: %v", err)
return err
}
log.Debug("Sending compressed claim tx")
log.Debugf("Sending compressed claim tx with %d claims", len(allClaimData))
tx, err := ac.bm.SendCompressedClaims(compressedTxData)
if err != nil {
log.Errorf("error sending compressed claims, Error: %v", err)
Expand Down
1 change: 1 addition & 0 deletions autoclaimservice/blockchainmanager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ func NewClient(ctx context.Context, cfg *Config) (*Client, error) {
return nil, err
}
logger := log.WithFields("networkID", networkID)
logger.Debug("BlockchainManager client configured")

return &Client{
ctx: ctx,
Expand Down
124 changes: 79 additions & 45 deletions claimtxman/claimtxman.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package claimtxman

import (
"context"
"errors"
"fmt"
"math/big"
"time"
Expand All @@ -11,6 +12,7 @@ import (
"github.com/0xPolygonHermez/zkevm-bridge-service/etherman"
"github.com/0xPolygonHermez/zkevm-bridge-service/log"
"github.com/0xPolygonHermez/zkevm-bridge-service/utils"
"github.com/0xPolygonHermez/zkevm-bridge-service/utils/gerror"
"github.com/0xPolygonHermez/zkevm-node/state/runtime"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
Expand Down Expand Up @@ -171,64 +173,96 @@ func (tm *ClaimTxManager) updateDepositsStatus(ger *etherman.GlobalExitRoot) err
}

func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbTx pgx.Tx) error {
var (
deposits []*etherman.Deposit
globalExitRoot = ger.GlobalExitRoot
err error
)
if ger.BlockID != 0 { // L2 exit root is updated
log.Infof("RollupID: %d, Rollup exitroot %v is updated", tm.rollupID, ger.ExitRoots[1])
if err := tm.storage.UpdateL2DepositsStatus(tm.ctx, ger.ExitRoots[1][:], tm.rollupID, tm.l2NetworkID, dbTx); err != nil {
err = tm.storage.UpdateL2DepositsStatus(tm.ctx, ger.ExitRoots[1][:], tm.rollupID, tm.l2NetworkID, dbTx)
if err != nil {
log.Errorf("rollupID: %d, error updating L2DepositsStatus. Error: %v", tm.rollupID, err)
return err
}
// If L2 claims processor is enabled
if tm.cfg.AreClaimsBetweenL2sEnabled {
log.Debugf("rollupID: %d, getting L2 deposits to autoClaim", tm.rollupID)
deposits, err = tm.storage.GetDepositsFromOtherL2ToClaim(tm.ctx, tm.l2NetworkID, dbTx)
if err != nil {
log.Errorf("rollupID: %d, error getting deposits from other L2 to claim. Error: %v", tm.rollupID, err)
return err
}
if len(deposits) > 0 {
globalExitRoot, err = tm.storage.GetLatestTrustedGERByDeposit(tm.ctx, deposits[0].DepositCount, deposits[0].NetworkID, deposits[0].DestinationNetwork, dbTx)
if errors.Is(err, gerror.ErrStorageNotFound) {
log.Infof("RollupID: %d, not fully synced yet. Retrying in 2s...")
time.Sleep(tm.cfg.RetryInterval.Duration)
globalExitRoot, err = tm.storage.GetLatestTrustedGERByDeposit(tm.ctx, deposits[0].DepositCount, deposits[0].NetworkID, deposits[0].DestinationNetwork, dbTx)
if errors.Is(err, gerror.ErrStorageNotFound) {
log.Infof("RollupID: %d, Still missing. Not fully synced yet. It will retry it later...")
} else if err != nil {
log.Errorf("rollupID: %d, error getting the latest trusted GER by deposit the second time. Error: %v", tm.rollupID, err)
return err
}
} else if err != nil {
log.Errorf("rollupID: %d, error getting the latest trusted GER by deposit. Error: %v", tm.rollupID, err)
return err
}
}
}
} else { // L1 exit root is updated in the trusted state
log.Infof("RollupID: %d, Mainnet exitroot %v is updated", tm.rollupID, ger.ExitRoots[0])
deposits, err := tm.storage.UpdateL1DepositsStatus(tm.ctx, ger.ExitRoots[0][:], tm.l2NetworkID, dbTx)
deposits, err = tm.storage.UpdateL1DepositsStatus(tm.ctx, ger.ExitRoots[0][:], tm.l2NetworkID, dbTx)
if err != nil {
log.Errorf("rollupID: %d, error getting and updating L1DepositsStatus. Error: %v", tm.rollupID, err)
return err
}
for _, deposit := range deposits {
if tm.l2NetworkID != deposit.DestinationNetwork {
log.Infof("Ignoring deposit id: %d deposit count:%d dest_net: %d, we are:%d", deposit.Id, deposit.DepositCount, deposit.DestinationNetwork, tm.l2NetworkID)
continue
}
}
for _, deposit := range deposits {
if tm.l2NetworkID != deposit.DestinationNetwork {
log.Infof("Ignoring deposit id: %d deposit count:%d dest_net: %d, we are:%d", deposit.Id, deposit.DepositCount, deposit.DestinationNetwork, tm.l2NetworkID)
continue
}

claimHash, err := tm.bridgeService.GetDepositStatus(tm.ctx, deposit.DepositCount, deposit.NetworkID, deposit.DestinationNetwork)
if err != nil {
log.Errorf("rollupID: %d, error getting deposit status for deposit id %d. Error: %v", tm.rollupID, deposit.Id, err)
return err
}
if len(claimHash) > 0 || deposit.LeafType == LeafTypeMessage && !tm.isDepositMessageAllowed(deposit) {
log.Infof("RollupID: %d, Ignoring deposit Id: %d, leafType: %d, claimHash: %s, deposit.OriginalAddress: %s", tm.rollupID, deposit.Id, deposit.LeafType, claimHash, deposit.OriginalAddress.String())
continue
}
claimHash, err := tm.bridgeService.GetDepositStatus(tm.ctx, deposit.DepositCount, deposit.NetworkID, deposit.DestinationNetwork)
if err != nil {
log.Errorf("rollupID: %d, error getting deposit status for deposit id %d. Error: %v", tm.rollupID, deposit.Id, err)
return err
}
if len(claimHash) > 0 || deposit.LeafType == LeafTypeMessage && !tm.isDepositMessageAllowed(deposit) {
log.Infof("RollupID: %d, Ignoring deposit Id: %d, leafType: %d, claimHash: %s, deposit.OriginalAddress: %s", tm.rollupID, deposit.Id, deposit.LeafType, claimHash, deposit.OriginalAddress.String())
continue
}

log.Infof("RollupID: %d, create the claim tx for the deposit count %d. Deposit Id: %d", tm.rollupID, deposit.DepositCount, deposit.Id)
ger, proof, rollupProof, err := tm.bridgeService.GetClaimProofForCompressed(ger.GlobalExitRoot, deposit.DepositCount, deposit.NetworkID, dbTx)
if err != nil {
log.Errorf("rollupID: %d, error getting Claim Proof for deposit Id %d. Error: %v", tm.rollupID, deposit.Id, err)
return err
}
var (
mtProof [mtHeight][keyLen]byte
mtRollupProof [mtHeight][keyLen]byte
)
for i := 0; i < mtHeight; i++ {
mtProof[i] = proof[i]
mtRollupProof[i] = rollupProof[i]
}
tx, err := tm.l2Node.BuildSendClaim(tm.ctx, deposit, mtProof, mtRollupProof,
&etherman.GlobalExitRoot{
ExitRoots: []common.Hash{
ger.ExitRoots[0],
ger.ExitRoots[1],
}}, 1, 1, 1, tm.rollupID,
tm.auth)
if err != nil {
log.Errorf("rollupID: %d, error BuildSendClaim tx for deposit Id: %d. Error: %v", tm.rollupID, deposit.Id, err)
return err
}
if err = tm.addClaimTx(deposit.Id, tm.auth.From, tx.To(), nil, tx.Data(), ger.GlobalExitRoot, dbTx); err != nil {
log.Errorf("rollupID: %d, error adding claim tx for deposit Id: %d Error: %v", tm.rollupID, deposit.Id, err)
return err
}
log.Infof("RollupID: %d, create the claim tx for the deposit count %d. Deposit Id: %d", tm.rollupID, deposit.DepositCount, deposit.Id)
ger, proof, rollupProof, err := tm.bridgeService.GetClaimProofForCompressed(globalExitRoot, deposit.DepositCount, deposit.NetworkID, dbTx)
if err != nil {
log.Errorf("rollupID: %d, error getting Claim Proof for deposit Id %d. Error: %v", tm.rollupID, deposit.Id, err)
return err
}
var (
mtProof [mtHeight][keyLen]byte
mtRollupProof [mtHeight][keyLen]byte
)
for i := 0; i < mtHeight; i++ {
mtProof[i] = proof[i]
mtRollupProof[i] = rollupProof[i]
}
tx, err := tm.l2Node.BuildSendClaim(tm.ctx, deposit, mtProof, mtRollupProof,
&etherman.GlobalExitRoot{
ExitRoots: []common.Hash{
ger.ExitRoots[0],
ger.ExitRoots[1],
}}, 1, 1, 1,
tm.auth)
if err != nil {
log.Errorf("rollupID: %d, error BuildSendClaim tx for deposit Id: %d. Error: %v", tm.rollupID, deposit.Id, err)
return err
}
if err = tm.addClaimTx(deposit.Id, tm.auth.From, tx.To(), nil, tx.Data(), ger.GlobalExitRoot, dbTx); err != nil {
log.Errorf("rollupID: %d, error adding claim tx for deposit Id: %d Error: %v", tm.rollupID, deposit.Id, err)
return err
}
}
return nil
Expand Down
2 changes: 2 additions & 0 deletions claimtxman/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ type Config struct {
RetryNumber int `mapstructure:"RetryNumber"`
// AuthorizedClaimMessageAddresses are the allowed address to bridge message with autoClaim
AuthorizedClaimMessageAddresses []common.Address `mapstructure:"AuthorizedClaimMessageAddresses"`
// Enables the ability to Claim bridges between L2s automatically
AreClaimsBetweenL2sEnabled bool `mapstructure:"AreClaimsBetweenL2sEnabled"`

// GroupingClaims is the configuration for grouping claims
GroupingClaims ConfigGroupingClaims `mapstructure:"GroupingClaims"`
Expand Down
2 changes: 2 additions & 0 deletions claimtxman/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ type StorageInterface interface {
AddBlock(ctx context.Context, block *etherman.Block, dbTx pgx.Tx) (uint64, error)
UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error)
UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint32, dbTx pgx.Tx) error
GetDepositsFromOtherL2ToClaim(ctx context.Context, destinationNetwork uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error)
GetLatestTrustedGERByDeposit(ctx context.Context, depositCnt, networkID, destinationNetwork uint32, dbTx pgx.Tx) (common.Hash, error)
AddClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx pgx.Tx) error
UpdateClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx pgx.Tx) error
GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx) ([]types.MonitoredTx, error)
Expand Down
1 change: 1 addition & 0 deletions config/config.debug.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ PrivateKey = {Path = "../test/test.keystore.claimtx", Password = "testonly"}
RetryInterval = "1s"
RetryNumber = 10
AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"]
AreClaimsBetweenL2sEnabled = false
[ClaimTxManager.GroupingClaims]
Enabled = false
TriggerNumberOfClaims = 20
Expand Down
1 change: 1 addition & 0 deletions config/config.local.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ PrivateKey = {Path = "/pk/keystore.claimtxmanager", Password = "testonly"}
RetryInterval = "1s"
RetryNumber = 10
AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"]
AreClaimsBetweenL2sEnabled = false
[ClaimTxManager.GroupingClaims]
Enabled = false
TriggerNumberOfClaims = 20
Expand Down
1 change: 1 addition & 0 deletions config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ PrivateKey = {Path = "./test/test.keystore", Password = "testonly"}
RetryInterval = "1s"
RetryNumber = 10
AuthorizedClaimMessageAddresses = []
AreClaimsBetweenL2sEnabled = false
[ClaimTxManager.GroupingClaims]
Enabled = false
FrequencyToProcessCompressedClaims = "10m"
Expand Down
96 changes: 50 additions & 46 deletions db/pgstorage/pgstorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,22 +550,7 @@ func (p *PostgresStorage) GetDeposits(ctx context.Context, destAddr string, limi
return nil, err
}

deposits := make([]*etherman.Deposit, 0, len(rows.RawValues()))

for rows.Next() {
var (
deposit etherman.Deposit
amount string
)
err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim)
if err != nil {
return nil, err
}
deposit.Amount, _ = new(big.Int).SetString(amount, 10) //nolint:gomnd
deposits = append(deposits, &deposit)
}

return deposits, nil
return parseDeposits(rows, true)
}

// GetDepositCount gets the deposit count for the destination address.
Expand All @@ -588,32 +573,40 @@ func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot [
return nil, err
}

deposits := make([]*etherman.Deposit, 0, len(rows.RawValues()))
for rows.Next() {
var (
deposit etherman.Deposit
amount string
)
err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim)
if err != nil {
return nil, err
}
deposit.Amount, _ = new(big.Int).SetString(amount, 10) //nolint:gomnd
deposits = append(deposits, &deposit)
}
return deposits, nil
return parseDeposits(rows, false)
}

// UpdateL2DepositsStatus updates the ready_for_claim status of L2 deposits.
func (p *PostgresStorage) UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint32, dbTx pgx.Tx) error {
const updateDepositsStatusSQL = `UPDATE sync.deposit SET ready_for_claim = true
const updateL2DepositsStatusSQL = `UPDATE sync.deposit SET ready_for_claim = true
WHERE deposit_cnt <=
(SELECT sync.deposit.deposit_cnt FROM mt.root INNER JOIN sync.deposit ON sync.deposit.id = mt.root.deposit_id WHERE mt.root.root = (select leaf from mt.rollup_exit where root = $1 and rollup_id = $2) AND mt.root.network = $3)
AND network_id = $3 AND ready_for_claim = false;`
_, err := p.getExecQuerier(dbTx).Exec(ctx, updateDepositsStatusSQL, exitRoot, rollupID, networkID)
_, err := p.getExecQuerier(dbTx).Exec(ctx, updateL2DepositsStatusSQL, exitRoot, rollupID, networkID)
return err
}

// GetDepositsFromOtherL2ToClaim returns L2 deposits whose destination is an specific L2
func (p *PostgresStorage) GetDepositsFromOtherL2ToClaim(ctx context.Context, destinationNetwork uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) {
const getL2DepositsToClaimStatusSQL = `select sync.deposit.id, sync.deposit.leaf_type, sync.deposit.orig_net, sync.deposit.orig_addr, sync.deposit.amount, sync.deposit.dest_net, sync.deposit.dest_addr, sync.deposit.deposit_cnt, sync.deposit.block_id, sync.deposit.network_id, sync.deposit.tx_hash, sync.deposit.metadata, sync.deposit.ready_for_claim FROM sync.deposit where sync.deposit.deposit_cnt not in (select index FROM sync.claim where sync.claim.network_id = $1) and sync.deposit.network_id !=0 and sync.deposit.dest_net = $1 and ready_for_claim =true order by sync.deposit.id desc;`
rows, err := p.getExecQuerier(dbTx).Query(ctx, getL2DepositsToClaimStatusSQL, destinationNetwork)
if err != nil {
return nil, err
}
return parseDeposits(rows, false)
}

// GetLatestTrustedGERByDeposit return the latest trusted ger for an specific deposit
func (p *PostgresStorage) GetLatestTrustedGERByDeposit(ctx context.Context, depositCnt, networkID, destinationNetwork uint32, dbTx pgx.Tx) (common.Hash, error) {
const getLatestTrustedGERByDeposit = `SELECT sync.exit_root.global_exit_root FROM sync.deposit inner join mt.root on mt.root.deposit_id = sync.deposit.id inner join mt.rollup_exit on mt.rollup_exit.leaf = mt.root.root inner join sync.exit_root on sync.exit_root.exit_roots[2]= mt.rollup_exit.root where deposit_cnt = $1 and sync.deposit.network_id = $2 and dest_net = $3 and mt.rollup_exit.rollup_id = $2 and sync.exit_root.block_id = 0 and sync.exit_root.network_id = sync.deposit.dest_net order by sync.exit_root.id desc limit 1`
var ger common.Hash
err := p.getExecQuerier(dbTx).QueryRow(ctx, getLatestTrustedGERByDeposit, depositCnt, networkID, destinationNetwork).Scan(&ger)
if errors.Is(err, pgx.ErrNoRows) {
return common.Hash{}, gerror.ErrStorageNotFound
}
return ger, err
}

// AddClaimTx adds a claim monitored transaction to the storage.
func (p *PostgresStorage) AddClaimTx(ctx context.Context, mTx ctmtypes.MonitoredTx, dbTx pgx.Tx) error {
const addMonitoredTxSQL = `INSERT INTO sync.monitored_txs
Expand Down Expand Up @@ -694,21 +687,10 @@ func (p *PostgresStorage) GetPendingDepositsToClaim(ctx context.Context, destAdd
return nil, 0, err
}

deposits := make([]*etherman.Deposit, 0, len(rows.RawValues()))

for rows.Next() {
var (
deposit etherman.Deposit
amount string
)
err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim)
if err != nil {
return nil, 0, err
}
deposit.Amount, _ = new(big.Int).SetString(amount, 10) //nolint:gomnd
deposits = append(deposits, &deposit)
deposits, err := parseDeposits(rows, true)
if err != nil {
return nil, 0, err
}

return deposits, totalCount, nil
}

Expand All @@ -725,3 +707,25 @@ func (p *PostgresStorage) UpdateBlocksForTesting(ctx context.Context, networkID
_, err := p.getExecQuerier(dbTx).Exec(ctx, updateBlocksSQL, networkID, blockNum)
return err
}

func parseDeposits(rows pgx.Rows, needBlockNum bool) ([]*etherman.Deposit, error) {
deposits := make([]*etherman.Deposit, 0, len(rows.RawValues()))
for rows.Next() {
var (
deposit etherman.Deposit
amount string
err error
)
if needBlockNum {
err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim)
} else {
err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim)
}
if err != nil {
return nil, err
}
deposit.Amount, _ = new(big.Int).SetString(amount, 10) //nolint:gomnd
deposits = append(deposits, &deposit)
}
return deposits, nil
}
1 change: 1 addition & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,7 @@ services:
- ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONBRIDGEADDRESS=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E
- ZKEVM_BRIDGE_NETWORKCONFIG_L2POLYGONBRIDGEADDRESSES=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E,0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E
- ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED=${ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED}
- ZKEVM_BRIDGE_CLAIMTXMANAGER_ARECLAIMSBETWEENL2SENABLED=${ZKEVM_BRIDGE_CLAIMTXMANAGER_ARECLAIMSBETWEENL2SENABLED}
volumes:
- ./test/test.keystore.claimtx:/pk/keystore.claimtxmanager
- ./config/config.local.toml:/app/config.toml
Expand Down
Loading

0 comments on commit 6489a6b

Please sign in to comment.