diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 6b4b95f8e0..44b360e76e 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -168,7 +168,7 @@ type BatchPosterConfig struct { L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` - GasEstimateBaseFeeMultipleBips arbmath.Bips `koanf:"gas-estimate-base-fee-multiple-bips"` + GasEstimateBaseFeeMultipleBips arbmath.UBips `koanf:"gas-estimate-base-fee-multiple-bips"` Dangerous BatchPosterDangerousConfig `koanf:"dangerous"` ReorgResistanceMargin time.Duration `koanf:"reorg-resistance-margin" reload:"hot"` CheckBatchCorrectness bool `koanf:"check-batch-correctness"` @@ -253,7 +253,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ L1BlockBoundBypass: time.Hour, UseAccessLists: true, RedisLock: redislock.DefaultCfg, - GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, + GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2, ReorgResistanceMargin: 10 * time.Minute, CheckBatchCorrectness: true, } @@ -285,7 +285,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ L1BlockBound: "", L1BlockBoundBypass: time.Hour, UseAccessLists: true, - GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, + GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2, CheckBatchCorrectness: true, } @@ -1035,7 +1035,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, if err != nil { return 0, err } - maxFeePerGas := arbmath.BigMulByBips(latestHeader.BaseFee, config.GasEstimateBaseFeeMultipleBips) + maxFeePerGas := arbmath.BigMulByUBips(latestHeader.BaseFee, config.GasEstimateBaseFeeMultipleBips) if useNormalEstimation { _, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs) if err != nil { @@ -1250,7 +1250,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) l1BoundMinTimestamp = arbmath.SaturatingUSub(latestHeader.Time, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds)) if config.L1BlockBoundBypass > 0 { + // #nosec G115 blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) + // #nosec G115 timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) l1BoundMinBlockNumberWithBypass = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelayBlocks)) l1BoundMinTimestampWithBypass = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds)) @@ -1316,7 +1318,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if hasL1Bound && config.ReorgResistanceMargin > 0 { firstMsgBlockNumber := firstMsg.Message.Header.BlockNumber firstMsgTimeStamp := firstMsg.Message.Header.Timestamp + // #nosec G115 batchNearL1BoundMinBlockNumber := firstMsgBlockNumber <= arbmath.SaturatingUAdd(l1BoundMinBlockNumber, uint64(config.ReorgResistanceMargin/ethPosBlockTime)) + // #nosec G115 batchNearL1BoundMinTimestamp := firstMsgTimeStamp <= arbmath.SaturatingUAdd(l1BoundMinTimestamp, uint64(config.ReorgResistanceMargin/time.Second)) if batchNearL1BoundMinTimestamp || batchNearL1BoundMinBlockNumber { log.Error( @@ -1361,6 +1365,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) batchPosterDAFailureCounter.Inc(1) return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } + // #nosec G115 sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain) if err != nil { batchPosterDAFailureCounter.Inc(1) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 6a483929b2..31b9a983db 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -638,11 +638,11 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if config.MaxFeeBidMultipleBips > 0 { // Limit the fee caps to be no greater than max(MaxFeeBidMultipleBips, minRbf) - maxNonBlobFee := arbmath.BigMulByBips(currentNonBlobFee, config.MaxFeeBidMultipleBips) + maxNonBlobFee := arbmath.BigMulByUBips(currentNonBlobFee, config.MaxFeeBidMultipleBips) if lastTx != nil { maxNonBlobFee = arbmath.BigMax(maxNonBlobFee, arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease)) } - maxBlobFee := arbmath.BigMulByBips(currentBlobFee, config.MaxFeeBidMultipleBips) + maxBlobFee := arbmath.BigMulByUBips(currentBlobFee, config.MaxFeeBidMultipleBips) if lastTx != nil && lastTx.BlobGasFeeCap() != nil { maxBlobFee = arbmath.BigMax(maxBlobFee, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease)) } @@ -1241,7 +1241,7 @@ type DataPosterConfig struct { MinBlobTxTipCapGwei float64 `koanf:"min-blob-tx-tip-cap-gwei" reload:"hot"` MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` MaxBlobTxTipCapGwei float64 `koanf:"max-blob-tx-tip-cap-gwei" reload:"hot"` - MaxFeeBidMultipleBips arbmath.Bips `koanf:"max-fee-bid-multiple-bips" reload:"hot"` + MaxFeeBidMultipleBips arbmath.UBips `koanf:"max-fee-bid-multiple-bips" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseDBStorage bool `koanf:"use-db-storage"` @@ -1345,7 +1345,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time MaxTipCapGwei: 1.2, MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x - MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + MaxFeeBidMultipleBips: arbmath.OneInUBips * 10, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: true, @@ -1380,7 +1380,7 @@ var TestDataPosterConfig = DataPosterConfig{ MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, MaxBlobTxTipCapGwei: 1, - MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + MaxFeeBidMultipleBips: arbmath.OneInUBips * 10, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: false, diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 7f2f61c07e..d2c49427be 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -204,7 +204,7 @@ func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, MaxBlobTxTipCapGwei: 10, - MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + MaxFeeBidMultipleBips: arbmath.OneInUBips * 10, AllocateMempoolBalance: true, UrgencyGwei: 2., @@ -335,7 +335,7 @@ func TestFeeAndTipCaps_RBF_RisingBlobFee_FallingBaseFee(t *testing.T) { MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, MaxBlobTxTipCapGwei: 10, - MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + MaxFeeBidMultipleBips: arbmath.OneInUBips * 10, AllocateMempoolBalance: true, UrgencyGwei: 2., diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index 37ebfa5099..6a6cd3cfa4 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -95,11 +95,11 @@ func (s *Storage) PruneAll(ctx context.Context) error { if err != nil { return fmt.Errorf("pruning all keys: %w", err) } - until, err := strconv.Atoi(string(idx)) + until, err := strconv.ParseUint(string(idx), 10, 64) if err != nil { return fmt.Errorf("converting last item index bytes to integer: %w", err) } - return s.Prune(ctx, uint64(until+1)) + return s.Prune(ctx, until+1) } func (s *Storage) Prune(ctx context.Context, until uint64) error { diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index 8934d92b45..c6316caea7 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -72,24 +72,29 @@ func newRedisStorage(ctx context.Context, t *testing.T, encF storage.EncoderDeco func valueOf(t *testing.T, i int) *storage.QueuedTransaction { t.Helper() + // #nosec G115 meta, err := rlp.EncodeToBytes(storage.BatchPosterPosition{DelayedMessageCount: uint64(i)}) if err != nil { t.Fatalf("Encoding batch poster position, error: %v", err) } return &storage.QueuedTransaction{ FullTx: types.NewTransaction( + // #nosec G115 uint64(i), common.Address{}, big.NewInt(int64(i)), + // #nosec G115 uint64(i), big.NewInt(int64(i)), []byte{byte(i)}), Meta: meta, DeprecatedData: types.DynamicFeeTx{ - ChainID: big.NewInt(int64(i)), - Nonce: uint64(i), - GasTipCap: big.NewInt(int64(i)), - GasFeeCap: big.NewInt(int64(i)), + ChainID: big.NewInt(int64(i)), + // #nosec G115 + Nonce: uint64(i), + GasTipCap: big.NewInt(int64(i)), + GasFeeCap: big.NewInt(int64(i)), + // #nosec G115 Gas: uint64(i), Value: big.NewInt(int64(i)), Data: []byte{byte(i % 8)}, @@ -113,6 +118,7 @@ func values(t *testing.T, from, to int) []*storage.QueuedTransaction { func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage { t.Helper() for i := 0; i < 20; i++ { + // #nosec G115 if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil { t.Fatalf("Error putting a key/value: %v", err) } @@ -153,6 +159,7 @@ func TestPruneAll(t *testing.T) { s := newLevelDBStorage(t, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) ctx := context.Background() for i := 0; i < 20; i++ { + // #nosec G115 if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil { t.Fatalf("Error putting a key/value: %v", err) } @@ -236,6 +243,7 @@ func TestLast(t *testing.T) { ctx := context.Background() for i := 0; i < cnt; i++ { val := valueOf(t, i) + // #nosec G115 if err := s.Put(ctx, uint64(i), nil, val); err != nil { t.Fatalf("Error putting a key/value: %v", err) } @@ -255,6 +263,7 @@ func TestLast(t *testing.T) { for i := 0; i < cnt-1; i++ { prev := valueOf(t, i) newVal := valueOf(t, cnt+i) + // #nosec G115 if err := s.Put(ctx, uint64(i), prev, newVal); err != nil { t.Fatalf("Error putting a key/value: %v, prev: %v, new: %v", err, prev, newVal) } diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index 4f18531a76..b29a66dd05 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -121,6 +121,7 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock if currentNum < config.FinalizeDistance { return nil } + // #nosec G115 finalized = uint64(currentNum - config.FinalizeDistance) } @@ -189,6 +190,7 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock return fmt.Errorf("inbox reader at delayed message %v db accumulator %v doesn't match delayed bridge accumulator %v at L1 block %v", pos-1, lastDelayedAcc, delayedBridgeAcc, finalized) } for i, msg := range messages { + // #nosec G115 err = d.exec.SequenceDelayedMessage(msg, startPos+uint64(i)) if err != nil { return err diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index fd050b5f67..c596cfa9b0 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -542,6 +542,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } else { from = arbmath.BigAddByUint(to, 1) } + // #nosec G115 haveMessages := uint64(len(delayedMessages) + len(sequencerBatches)) if haveMessages <= (config.TargetMessagesRead / 2) { blocksToFetch += (blocksToFetch + 4) / 5 diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index a5bab8342f..24a0564b97 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -279,6 +279,7 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde return err } config := s.config() + // #nosec G115 maxResequenceMsgCount := count + arbutil.MessageIndex(config.MaxReorgResequenceDepth) if config.MaxReorgResequenceDepth >= 0 && maxResequenceMsgCount < targetMsgCount { log.Error( @@ -388,6 +389,7 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde } for i := 0; i < len(messagesResults); i++ { + // #nosec G115 pos := count + arbutil.MessageIndex(i) err = s.storeResult(pos, *messagesResults[i], batch) if err != nil { @@ -680,7 +682,7 @@ func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, m if err != nil { return err } - if dups == len(messages) { + if dups == uint64(len(messages)) { return endBatch(batch) } // cant keep reorg lock when catching insertionMutex. @@ -715,10 +717,10 @@ func (s *TransactionStreamer) countDuplicateMessages( pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadataAndBlockHash, batch *ethdb.Batch, -) (int, bool, *arbostypes.MessageWithMetadata, error) { - curMsg := 0 +) (uint64, bool, *arbostypes.MessageWithMetadata, error) { + var curMsg uint64 for { - if len(messages) == curMsg { + if uint64(len(messages)) == curMsg { break } key := dbKey(messagePrefix, uint64(pos)) @@ -818,7 +820,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil broadcastStartPos := arbutil.MessageIndex(s.broadcasterQueuedMessagesPos.Load()) if messagesAreConfirmed { - var duplicates int + var duplicates uint64 var err error duplicates, confirmedReorg, oldMsg, err = s.countDuplicateMessages(messageStartPos, messages, &batch) if err != nil { @@ -857,7 +859,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil var feedReorg bool if !hasNewConfirmedMessages { - var duplicates int + var duplicates uint64 var err error duplicates, feedReorg, oldMsg, err = s.countDuplicateMessages(messageStartPos, messages, nil) if err != nil { @@ -889,6 +891,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Validate delayed message counts of remaining messages for i, msg := range messages { + // #nosec G115 msgPos := messageStartPos + arbutil.MessageIndex(i) diff := msg.MessageWithMeta.DelayedMessagesRead - lastDelayedRead if diff != 0 && diff != 1 { @@ -924,6 +927,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Check if new messages were added at the end of cache, if they were, then dont remove those particular messages if len(s.broadcasterQueuedMessages) > cacheClearLen { s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[cacheClearLen:] + // #nosec G115 s.broadcasterQueuedMessagesPos.Store(uint64(broadcastStartPos) + uint64(cacheClearLen)) } else { s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[:0] @@ -1044,6 +1048,7 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ batch = s.db.NewBatch() } for i, msg := range messages { + // #nosec G115 err := s.writeMessage(pos+arbutil.MessageIndex(i), msg, batch) if err != nil { return err diff --git a/arbos/activate_test.go b/arbos/activate_test.go index 55440bb208..a89a38639a 100644 --- a/arbos/activate_test.go +++ b/arbos/activate_test.go @@ -20,6 +20,7 @@ func TestActivationDataFee(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) state, _ := arbosState.NewArbosMemoryBackedArbOSState() pricer := state.Programs().DataPricer() + // #nosec G115 time := uint64(time.Now().Unix()) assert := func(cond bool) { diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 1f09ff1440..156f36e7e7 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -79,6 +79,7 @@ func (as *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error } ret := make([]common.Address, size) for i := range ret { + // #nosec G115 sba := as.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) ret[i], err = sba.Get() if err != nil { diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 566c71b689..6ae271060d 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -103,6 +103,7 @@ func (atab *AddressTable) Decompress(buf []byte) (common.Address, uint64, error) return common.Address{}, 0, err } if len(input) == 20 { + // #nosec G115 numBytesRead := uint64(rd.Size() - int64(rd.Len())) return common.BytesToAddress(input), numBytesRead, nil } else { diff --git a/arbos/arbosState/initialization_test.go b/arbos/arbosState/initialization_test.go index b0fe1d0dac..5e605b8bd2 100644 --- a/arbos/arbosState/initialization_test.go +++ b/arbos/arbosState/initialization_test.go @@ -126,6 +126,7 @@ func checkAddressTable(arbState *ArbosState, addrTable []common.Address, t *test Fail(t) } for i, addr := range addrTable { + // #nosec G115 res, exists, err := atab.LookupIndex(uint64(i)) Require(t, err) if !exists { diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index 56fa579c15..427bdc3087 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -108,7 +108,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, if err != nil { return common.Hash{}, err } - for i := 0; addressReader.More(); i++ { + for i := uint64(0); addressReader.More(); i++ { addr, err := addressReader.GetNext() if err != nil { return common.Hash{}, err @@ -117,7 +117,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, if err != nil { return common.Hash{}, err } - if uint64(i) != slot { + if i != slot { return common.Hash{}, errors.New("address table slot mismatch") } } diff --git a/arbos/l1pricing_test.go b/arbos/l1pricing_test.go index 1cda4b3d82..6f9e3ecb35 100644 --- a/arbos/l1pricing_test.go +++ b/arbos/l1pricing_test.go @@ -279,7 +279,9 @@ func _testL1PriceEquilibration(t *testing.T, initialL1BasefeeEstimate *big.Int, evm.StateDB, evm, 3, + // #nosec G115 uint64(10*(i+1)), + // #nosec G115 uint64(10*(i+1)+5), bpAddr, arbmath.BigMulByUint(equilibriumL1BasefeeEstimate, unitsToAdd), diff --git a/arbos/merkleAccumulator/merkleAccumulator.go b/arbos/merkleAccumulator/merkleAccumulator.go index 2e060c5840..e62303e5fd 100644 --- a/arbos/merkleAccumulator/merkleAccumulator.go +++ b/arbos/merkleAccumulator/merkleAccumulator.go @@ -97,6 +97,7 @@ func (acc *MerkleAccumulator) GetPartials() ([]*common.Hash, error) { } partials := make([]*common.Hash, CalcNumPartials(size)) for i := range partials { + // #nosec G115 p, err := acc.getPartial(uint64(i)) if err != nil { return nil, err diff --git a/arbos/programs/data_pricer.go b/arbos/programs/data_pricer.go index ed7c98556d..d82aa81f04 100644 --- a/arbos/programs/data_pricer.go +++ b/arbos/programs/data_pricer.go @@ -83,8 +83,8 @@ func (p *DataPricer) UpdateModel(tempBytes uint32, time uint64) (*big.Int, error } exponent := arbmath.OneInBips * arbmath.Bips(demand) / arbmath.Bips(inertia) - multiplier := arbmath.ApproxExpBasisPoints(exponent, 12).Uint64() - costPerByte := arbmath.SaturatingUMul(uint64(minPrice), multiplier) / 10000 + multiplier := arbmath.ApproxExpBasisPoints(exponent, 12) + costPerByte := arbmath.UintSaturatingMulByBips(uint64(minPrice), multiplier) costInWei := arbmath.SaturatingUMul(costPerByte, uint64(tempBytes)) return arbmath.UintToBig(costInWei), nil } diff --git a/arbos/retryable_test.go b/arbos/retryable_test.go index ddb88348dd..2eccaea6c2 100644 --- a/arbos/retryable_test.go +++ b/arbos/retryable_test.go @@ -38,6 +38,7 @@ func TestRetryableLifecycle(t *testing.T) { retryableState := state.RetryableState() lifetime := uint64(retryables.RetryableLifetimeSeconds) + // #nosec G115 timestampAtCreation := uint64(rand.Int63n(1 << 16)) timeoutAtCreation := timestampAtCreation + lifetime currentTime := timeoutAtCreation @@ -57,6 +58,7 @@ func TestRetryableLifecycle(t *testing.T) { checkQueueSize := func(expected int, message string) { timeoutQueueSize, err := retryableState.TimeoutQueue.Size() Require(t, err) + // #nosec G115 if timeoutQueueSize != uint64(expected) { Fail(t, currentTime, message, timeoutQueueSize) } @@ -167,6 +169,7 @@ func TestRetryableCleanup(t *testing.T) { callvalue := big.NewInt(0) calldata := testhelpers.RandomizeSlice(make([]byte, rand.Intn(1<<12))) + // #nosec G115 timeout := uint64(rand.Int63n(1 << 16)) timestamp := 2 * timeout diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index e1cfe48bcf..5938244782 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -367,5 +367,7 @@ func RetryableEscrowAddress(ticketId common.Hash) common.Address { } func RetryableSubmissionFee(calldataLengthInBytes int, l1BaseFee *big.Int) *big.Int { - return arbmath.BigMulByUint(l1BaseFee, uint64(1400+6*calldataLengthInBytes)) + // This can't overflow because calldataLengthInBytes would need to be 3 exabytes + // #nosec G115 + return arbmath.BigMulByUint(l1BaseFee, 1400+6*uint64(calldataLengthInBytes)) } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 352726778d..bc16491af0 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -322,11 +322,11 @@ func (s *Storage) Burner() burn.Burner { } func (s *Storage) Keccak(data ...[]byte) ([]byte, error) { - byteCount := 0 + var byteCount uint64 for _, part := range data { - byteCount += len(part) + byteCount += uint64(len(part)) } - cost := 30 + 6*arbmath.WordsForBytes(uint64(byteCount)) + cost := 30 + 6*arbmath.WordsForBytes(byteCount) if err := s.burner.Burn(cost); err != nil { return nil, err } @@ -420,6 +420,7 @@ func (sbu *StorageBackedInt64) Get() (int64, error) { } func (sbu *StorageBackedInt64) Set(value int64) error { + // #nosec G115 return sbu.StorageSlot.Set(util.UintToHash(uint64(value))) // see implementation note above } @@ -456,7 +457,7 @@ func (sbu *StorageBackedUBips) Get() (arbmath.UBips, error) { } func (sbu *StorageBackedUBips) Set(bips arbmath.UBips) error { - return sbu.backing.Set(bips.Uint64()) + return sbu.backing.Set(uint64(bips)) } type StorageBackedUint16 struct { diff --git a/arbutil/block_message_relation.go b/arbutil/block_message_relation.go index e164cf2619..dcf4c86084 100644 --- a/arbutil/block_message_relation.go +++ b/arbutil/block_message_relation.go @@ -11,6 +11,7 @@ func BlockNumberToMessageCount(blockNumber uint64, genesisBlockNumber uint64) Me // Block number must correspond to a message count, meaning it may not be less than -1 func SignedBlockNumberToMessageCount(blockNumber int64, genesisBlockNumber uint64) MessageIndex { + // #nosec G115 return MessageIndex(uint64(blockNumber+1) - genesisBlockNumber) } diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index f7cc0d8c72..b43999a7db 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -113,6 +113,7 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block } // Divide work equally among available threads when BlocksPerThread is zero if c.BlocksPerThread == 0 { + // #nosec G115 work := (end - start) / uint64(c.Room) if work > 0 { blocksPerThread = work diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 44b48192ab..a499628cd5 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -105,6 +105,7 @@ func testReceiveMessages(t *testing.T, clientCompression bool, serverCompression go func() { for i := 0; i < messageCount; i++ { + // #nosec G115 Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil)) } }() @@ -156,6 +157,7 @@ func TestInvalidSignature(t *testing.T) { go func() { for i := 0; i < messageCount; i++ { + // #nosec G115 Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil)) } }() diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 397698635a..4fe8657bfa 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -104,6 +104,7 @@ func (b *Broadcaster) BroadcastMessages( }() var feedMessages []*m.BroadcastFeedMessage for i, msg := range messagesWithBlockHash { + // #nosec G115 bfm, err := b.NewBroadcastFeedMessage(msg.MessageWithMeta, seq+arbutil.MessageIndex(i), msg.BlockHash) if err != nil { return err diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index aca9598754..1e26e6da5e 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -41,6 +41,7 @@ type BroadcastFeedMessage struct { } func (m *BroadcastFeedMessage) Size() uint64 { + // #nosec G115 return uint64(len(m.Signature) + len(m.Message.Message.L2msg) + 160) } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index ba60cbbd4d..f791d8cbc4 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -166,8 +166,10 @@ func startClientStore(args []string) error { if err != nil { return err } + // #nosec G115 cert, err = client.Store(ctx, message, uint64(time.Now().Add(config.DASRetentionPeriod).Unix())) } else if len(config.Message) > 0 { + // #nosec G115 cert, err = client.Store(ctx, []byte(config.Message), uint64(time.Now().Add(config.DASRetentionPeriod).Unix())) } else { return errors.New("--message or --random-message-size must be specified") @@ -363,6 +365,7 @@ func dumpKeyset(args []string) error { return err } + // #nosec G115 keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.Keyset.AssumedHonest)) if err != nil { return err diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index a8463a7d21..fc59f2d231 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -876,6 +876,7 @@ func testUpdateTxIndex(chainDb ethdb.Database, chainConfig *params.ChainConfig, localWg.Add(1) go func() { batch := chainDb.NewBatch() + // #nosec G115 for blockNum := uint64(thread); blockNum <= lastBlock; blockNum += uint64(threads) { blockHash := rawdb.ReadCanonicalHash(chainDb, blockNum) block := rawdb.ReadBlock(chainDb, blockHash, blockNum) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 146a0049e7..e66d99b56e 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -892,6 +892,7 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa l2ChainInfoIpfsDownloadPath := k.String("chain.info-ipfs-download-path") l2ChainInfoFiles := k.Strings("chain.info-files") l2ChainInfoJson := k.String("chain.info-json") + // #nosec G115 err = applyChainParameters(ctx, k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) if err != nil { return nil, nil, err @@ -1037,13 +1038,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c func initReorg(initConfig conf.InitConfig, chainConfig *params.ChainConfig, inboxTracker *arbnode.InboxTracker) error { var batchCount uint64 if initConfig.ReorgToBatch >= 0 { + // #nosec G115 batchCount = uint64(initConfig.ReorgToBatch) + 1 } else { var messageIndex arbutil.MessageIndex if initConfig.ReorgToMessageBatch >= 0 { + // #nosec G115 messageIndex = arbutil.MessageIndex(initConfig.ReorgToMessageBatch) } else if initConfig.ReorgToBlockBatch > 0 { genesis := chainConfig.ArbitrumChainParams.GenesisBlockNum + // #nosec G115 blockNum := uint64(initConfig.ReorgToBlockBatch) if blockNum < genesis { return fmt.Errorf("ReorgToBlockBatch %d before genesis %d", blockNum, genesis) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 096bb4b1ae..6fc7741478 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -212,6 +212,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node } if meta.ParentChainBlock <= l1BlockNum { signedBlockNum := arbutil.MessageCountToBlockNumber(meta.MessageCount, genesisNum) + // #nosec G115 blockNum := uint64(signedBlockNum) l2Hash := rawdb.ReadCanonicalHash(chainDb, blockNum) l2Header := rawdb.ReadHeader(chainDb, l2Hash, blockNum) diff --git a/das/aggregator.go b/das/aggregator.go index e8972447ad..e7460fa371 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -130,6 +130,7 @@ func NewAggregatorWithSeqInboxCaller( seqInboxCaller *bridgegen.SequencerInboxCaller, ) (*Aggregator, error) { + // #nosec G115 keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.RPCAggregator.AssumedHonest)) if err != nil { return nil, err diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 635696bdab..7d48ed796d 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -58,6 +58,7 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + // #nosec G115 timestamp := uint64(time.Now().Unix()) nChunks := uint64(len(message)) / c.chunkSize lastChunkSize := uint64(len(message)) % c.chunkSize diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index d14766cc7e..bb1be0384e 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -153,7 +153,7 @@ type SendChunkResult struct { type batch struct { chunks [][]byte expectedChunks uint64 - seenChunks atomic.Int64 + seenChunks atomic.Uint64 expectedChunkSize, expectedSize uint64 timeout uint64 startTime time.Time @@ -248,7 +248,7 @@ func (b *batchBuilder) close(id uint64) ([]byte, uint64, time.Time, error) { return nil, 0, time.Time{}, fmt.Errorf("unknown batch(%d)", id) } - if batch.expectedChunks != uint64(batch.seenChunks.Load()) { + if batch.expectedChunks != batch.seenChunks.Load() { return nil, 0, time.Time{}, fmt.Errorf("incomplete batch(%d): got %d/%d chunks", id, batch.seenChunks.Load(), batch.expectedChunks) } diff --git a/das/das_test.go b/das/das_test.go index 179734c8b1..4971d454e5 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -55,6 +55,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Require(t, err, "no das") var daReader DataAvailabilityServiceReader = storageService + // #nosec G115 timeout := uint64(time.Now().Add(time.Hour * 24).Unix()) messageSaved := []byte("hello world") cert, err := daWriter.Store(firstCtx, messageSaved, timeout) @@ -146,6 +147,7 @@ func testDASMissingMessage(t *testing.T, storageType string) { var daReader DataAvailabilityServiceReader = storageService messageSaved := []byte("hello world") + // #nosec G115 timeout := uint64(time.Now().Add(time.Hour * 24).Unix()) cert, err := daWriter.Store(ctx, messageSaved, timeout) Require(t, err, "Error storing message") diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 1d9e5348d4..74bf12b927 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -267,6 +267,7 @@ func (dbs *DBStorageService) String() string { func (dbs *DBStorageService) HealthCheck(ctx context.Context) error { testData := []byte("Test-Data") + // #nosec G115 err := dbs.Put(ctx, testData, uint64(time.Now().Add(time.Minute).Unix())) if err != nil { return err diff --git a/das/factory.go b/das/factory.go index 5742a39479..7f696912b3 100644 --- a/das/factory.go +++ b/das/factory.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "math" "github.com/ethereum/go-ethereum/common" @@ -187,12 +186,7 @@ func CreateDAComponentsForDaserver( dasLifecycleManager.Register(restAgg) syncConf := &config.RestAggregator.SyncToStorage - var retentionPeriodSeconds uint64 - if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { - retentionPeriodSeconds = math.MaxUint64 - } else { - retentionPeriodSeconds = uint64(syncConf.RetentionPeriod.Seconds()) - } + retentionPeriodSeconds := uint64(syncConf.RetentionPeriod.Seconds()) if syncConf.Eager { if l1Reader == nil || seqInboxAddress == nil { diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index 49f961da60..0a451678d0 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -85,6 +85,7 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key common.Hash) } if dastree.ValidHash(key, data) { putErr := f.StorageService.Put( + // #nosec G115 ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds), ) if putErr != nil && !f.ignoreRetentionWriteErrors { diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index ce86786718..5e64c34b10 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -377,6 +377,7 @@ func migrate(fl *flatLayout, tl *trieLayout) error { return err } + // #nosec G115 expiryPath := tl.expiryPath(batch.key, uint64(batch.expiry.Unix())) if err = createHardLink(newPath, expiryPath); err != nil { return err diff --git a/das/local_file_storage_service_test.go b/das/local_file_storage_service_test.go index 01b999f356..8a36664670 100644 --- a/das/local_file_storage_service_test.go +++ b/das/local_file_storage_service_test.go @@ -78,6 +78,7 @@ func TestMigrationNoExpiry(t *testing.T) { Require(t, err) s.enableLegacyLayout = true + // #nosec G115 now := uint64(time.Now().Unix()) err = s.Put(ctx, []byte("a"), now+1) @@ -121,14 +122,19 @@ func TestMigrationExpiry(t *testing.T) { now := time.Now() // Use increments of expiry divisor in order to span multiple by-expiry-timestamp dirs + // #nosec G115 err = s.Put(ctx, []byte("a"), uint64(now.Add(-2*time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("b"), uint64(now.Add(-1*time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("c"), uint64(now.Add(time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("d"), uint64(now.Add(time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("e"), uint64(now.Add(2*time.Second*expiryDivisor).Unix())) Require(t, err) @@ -171,19 +177,26 @@ func TestExpiryDuplicates(t *testing.T) { now := time.Now() // Use increments of expiry divisor in order to span multiple by-expiry-timestamp dirs + // #nosec G115 err = s.Put(ctx, []byte("a"), uint64(now.Add(-2*time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("a"), uint64(now.Add(-1*time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("a"), uint64(now.Add(time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("d"), uint64(now.Add(time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("e"), uint64(now.Add(2*time.Second*expiryDivisor).Unix())) Require(t, err) + // #nosec G115 err = s.Put(ctx, []byte("f"), uint64(now.Add(3*time.Second*expiryDivisor).Unix())) Require(t, err) // Put the same entry and expiry again, should have no effect + // #nosec G115 err = s.Put(ctx, []byte("f"), uint64(now.Add(3*time.Second*expiryDivisor).Unix())) Require(t, err) diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go index 55f3ecd82c..77d3e8cd0f 100644 --- a/das/redis_storage_service_test.go +++ b/das/redis_storage_service_test.go @@ -16,6 +16,7 @@ import ( func TestRedisStorageService(t *testing.T) { ctx := context.Background() + // #nosec G115 timeout := uint64(time.Now().Add(time.Hour).Unix()) baseStorageService := NewMemoryBackedStorageService(ctx) server, err := miniredis.Run() diff --git a/das/redundant_storage_test.go b/das/redundant_storage_test.go index b56f62ee24..11d3b58264 100644 --- a/das/redundant_storage_test.go +++ b/das/redundant_storage_test.go @@ -17,6 +17,7 @@ const NumServices = 3 func TestRedundantStorageService(t *testing.T) { ctx := context.Background() + // #nosec G115 timeout := uint64(time.Now().Add(time.Hour).Unix()) services := []StorageService{} for i := 0; i < NumServices; i++ { diff --git a/das/restful_server_test.go b/das/restful_server_test.go index 1d3675749a..e6982f9db5 100644 --- a/das/restful_server_test.go +++ b/das/restful_server_test.go @@ -48,6 +48,7 @@ func TestRestfulClientServer(t *testing.T) { server, port, err := NewRestfulDasServerOnRandomPort(LocalServerAddressForTest, storage) Require(t, err) + // #nosec G115 err = storage.Put(ctx, data, uint64(time.Now().Add(time.Hour).Unix())) Require(t, err) diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 24a470be5b..1b3e2b8f44 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -119,7 +119,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] return nil, err } - d, err := NewServiceDetails(service, *pubKey, 1< uint64(countsNum) && sectionNum > 1 { break } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 457dae0910..7a78cee309 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -671,7 +671,7 @@ func l2MessageBatchDataFromTxes(txes types.Transactions) ([]byte, error) { if err != nil { return nil, err } - binary.BigEndian.PutUint64(sizeBuf, uint64(len(txBytes)+1)) + binary.BigEndian.PutUint64(sizeBuf, uint64(len(txBytes))+1) l2Message = append(l2Message, sizeBuf...) l2Message = append(l2Message, arbos.L2MessageKind_SignedTx) l2Message = append(l2Message, txBytes...) diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index 284c709fad..6285702342 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -214,7 +214,7 @@ func TestComponentEstimate(t *testing.T) { userBalance := big.NewInt(1e16) maxPriorityFeePerGas := big.NewInt(0) - maxFeePerGas := arbmath.BigMulByUfrac(l2BaseFee, 3, 2) + maxFeePerGas := arbmath.BigMulByUFrac(l2BaseFee, 3, 2) builder.L2Info.GenerateAccount("User") builder.L2.TransferBalance(t, "Owner", "User", userBalance, builder.L2Info) diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 17bfb18892..927dc1b630 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -163,6 +163,7 @@ func TestGetL1Confirmations(t *testing.T) { numTransactions := 200 + // #nosec G115 if l1Confs >= uint64(numTransactions) { t.Fatalf("L1Confirmations for latest block %v is already %v (over %v)", genesisBlock.Number(), l1Confs, numTransactions) } @@ -175,6 +176,7 @@ func TestGetL1Confirmations(t *testing.T) { Require(t, err) // Allow a gap of 10 for asynchronicity, just in case + // #nosec G115 if l1Confs+10 < uint64(numTransactions) { t.Fatalf("L1Confirmations for latest block %v is only %v (did not hit expected %v)", genesisBlock.Number(), l1Confs, numTransactions) } diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index c68df6ea22..25c52396f9 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -175,6 +175,7 @@ func TestOutboxProofs(t *testing.T) { sibling := place ^ which position := merkletree.LevelAndLeaf{ + // #nosec G115 Level: uint64(level), Leaf: sibling, } @@ -201,6 +202,7 @@ func TestOutboxProofs(t *testing.T) { leaf := total - 1 // preceding it. We subtract 1 since we count from 0 partial := merkletree.LevelAndLeaf{ + // #nosec G115 Level: uint64(level), Leaf: leaf, } @@ -289,6 +291,7 @@ func TestOutboxProofs(t *testing.T) { step.Leaf += 1 << step.Level // we start on the min partial's zero-hash sibling known[step] = zero + // #nosec G115 for step.Level < uint64(treeLevels) { curr, ok := known[step] diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 7b09552e28..4833d35536 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -362,12 +362,14 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Require(t, err) l2info.GenerateAccount("User2") + // #nosec G115 for i := genesis; i < uint64(txCount)+genesis; i++ { tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) err := client.SendTransaction(ctx, tx) Require(t, err) receipt, err := EnsureTxSucceeded(ctx, client, tx) Require(t, err) + // #nosec G115 if have, want := receipt.BlockNumber.Uint64(), uint64(i)+1; have != want { Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) } @@ -378,6 +380,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Fatal(t, "missing current block") } lastBlock := currentHeader.Number.Uint64() + // #nosec G115 if want := genesis + uint64(txCount); lastBlock < want { Fatal(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock) } @@ -391,6 +394,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig bc = builder.L2.ExecNode.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks + // #nosec G115 for i := genesis; i <= genesis+uint64(txCount); i++ { block := bc.GetBlockByNumber(i) if block == nil { @@ -423,6 +427,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig } } } + // #nosec G115 for i := genesis + 1; i <= genesis+uint64(txCount); i += i % 10 { _, err = client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(i)) if err != nil { diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 5e70fdf098..21f0755225 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -164,12 +164,12 @@ func compareAllMsgResultsFromConsensusAndExecution( } var lastResult *execution.MessageResult - for msgCount := 1; arbutil.MessageIndex(msgCount) <= consensusMsgCount; msgCount++ { + for msgCount := arbutil.MessageIndex(1); msgCount <= consensusMsgCount; msgCount++ { pos := msgCount - 1 resultExec, err := testClient.ExecNode.ResultAtPos(arbutil.MessageIndex(pos)) Require(t, err) - resultConsensus, err := testClient.ConsensusNode.TxStreamer.ResultAtCount(arbutil.MessageIndex(msgCount)) + resultConsensus, err := testClient.ConsensusNode.TxStreamer.ResultAtCount(msgCount) Require(t, err) if !reflect.DeepEqual(resultExec, resultConsensus) { diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index 6babe5833f..a9f66b0e2f 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -265,6 +265,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { for j := 0; j < numMessages; j++ { sourceNum := rand.Int() % len(state.accounts) source := state.accounts[sourceNum] + // #nosec G115 amount := new(big.Int).SetUint64(uint64(rand.Int()) % state.balances[source].Uint64()) reserveAmount := new(big.Int).SetUint64(l2pricing.InitialBaseFeeWei * 100000000) if state.balances[source].Cmp(new(big.Int).Add(amount, reserveAmount)) < 0 { @@ -314,6 +315,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { for j := 0; ; j++ { haveNonce, err := builder.L1.Client.PendingNonceAt(ctx, seqOpts.From) Require(t, err) + // #nosec G115 if haveNonce == uint64(seqNonce) { break } @@ -380,6 +382,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { t.Errorf("Transaction: %v was not refunded, balance diff: %v, cost: %v", tx.Hash(), diff, txCost) } + // #nosec G115 state.l2BlockNumber += uint64(numMessages) state.l1BlockNumber = txRes.BlockNumber.Uint64() blockStates = append(blockStates, state) diff --git a/system_tests/stylus_trace_test.go b/system_tests/stylus_trace_test.go index cb303874d6..5c4463d9f7 100644 --- a/system_tests/stylus_trace_test.go +++ b/system_tests/stylus_trace_test.go @@ -76,6 +76,7 @@ func sendAndTraceTransaction( } func intToBytes(v int) []byte { + // #nosec G115 return binary.BigEndian.AppendUint64(nil, uint64(v)) } diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go index 646dad3a92..39b014f3ac 100644 --- a/util/arbmath/bips.go +++ b/util/arbmath/bips.go @@ -27,24 +27,35 @@ func BigMulByBips(value *big.Int, bips Bips) *big.Int { return BigMulByFrac(value, int64(bips), int64(OneInBips)) } +func BigMulByUBips(value *big.Int, bips UBips) *big.Int { + return BigMulByUFrac(value, uint64(bips), uint64(OneInUBips)) +} + func IntMulByBips(value int64, bips Bips) int64 { return value * int64(bips) / int64(OneInBips) } +// UintMulByBips multiplies a uint value by a bips value +// bips must be positive and not cause an overflow func UintMulByBips(value uint64, bips Bips) uint64 { + // #nosec G115 return value * uint64(bips) / uint64(OneInBips) } -func SaturatingCastToBips(value uint64) Bips { - return Bips(SaturatingCast[int64](value)) -} - -func (bips UBips) Uint64() uint64 { - return uint64(bips) +// UintSaturatingMulByBips multiplies a uint value by a bips value, +// saturating at the maximum bips value (not the maximum uint64 result), +// then rounding down and returning a uint64. +// Returns 0 if bips is less than or equal to zero +func UintSaturatingMulByBips(value uint64, bips Bips) uint64 { + if bips <= 0 { + return 0 + } + // #nosec G115 + return SaturatingUMul(value, uint64(bips)) / uint64(OneInBips) } -func (bips Bips) Uint64() uint64 { - return uint64(bips) +func SaturatingCastToBips(value uint64) Bips { + return Bips(SaturatingCast[int64](value)) } // BigDivToBips returns dividend/divisor as bips, saturating if out of bounds diff --git a/util/arbmath/math.go b/util/arbmath/math.go index e5bed67f6d..07a9941b65 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -29,6 +29,7 @@ func NextOrCurrentPowerOf2(value uint64) uint64 { // Log2ceil the log2 of the int, rounded up func Log2ceil(value uint64) uint64 { + // #nosec G115 return uint64(64 - bits.LeadingZeros64(value)) } @@ -228,8 +229,8 @@ func BigMulByFrac(value *big.Int, numerator, denominator int64) *big.Int { return value } -// BigMulByUfrac multiply a huge by a rational whose components are non-negative -func BigMulByUfrac(value *big.Int, numerator, denominator uint64) *big.Int { +// BigMulByUFrac multiply a huge by a rational whose components are non-negative +func BigMulByUFrac(value *big.Int, numerator, denominator uint64) *big.Int { value = new(big.Int).Set(value) value.Mul(value, new(big.Int).SetUint64(numerator)) value.Div(value, new(big.Int).SetUint64(denominator)) @@ -407,6 +408,8 @@ func ApproxExpBasisPoints(value Bips, accuracy uint64) Bips { if negative { input = -value } + // This cast is safe because input is always positive + // #nosec G115 x := uint64(input) bips := uint64(OneInBips) diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go index 528666dc19..3660f3657e 100644 --- a/util/arbmath/math_test.go +++ b/util/arbmath/math_test.go @@ -44,6 +44,7 @@ func TestMath(t *testing.T) { // try the first million sqrts for i := 0; i < 1000000; i++ { + // #nosec G115 input := uint64(i) approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index 2b47a940c3..160323cf60 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -191,6 +191,7 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil || len(rawData) == 0 { // blobs are pruned after 4096 epochs (1 epoch = 32 slots), we determine if the requested slot were to be pruned by a non-archive endpoint + // #nosec G115 roughAgeOfSlot := uint64(time.Now().Unix()) - (b.genesisTime + slot*b.secondsPerSlot) if roughAgeOfSlot > b.secondsPerSlot*32*4096 { return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching older blobs in slot: %d, an archive endpoint is required, please refer to https://docs.arbitrum.io/run-arbitrum-node/l1-ethereum-beacon-chain-rpc-providers, err: %w", slot, err) diff --git a/util/merkletree/merkleEventProof_test.go b/util/merkletree/merkleEventProof_test.go index b64cc88c2a..6af8479190 100644 --- a/util/merkletree/merkleEventProof_test.go +++ b/util/merkletree/merkleEventProof_test.go @@ -22,6 +22,7 @@ func initializedMerkleAccumulatorForTesting() *merkleAccumulator.MerkleAccumulat func TestProofForNext(t *testing.T) { leaves := make([]common.Hash, 13) for i := range leaves { + // #nosec G115 leaves[i] = pseudorandomForTesting(uint64(i)) } diff --git a/validator/server_arb/machine_cache.go b/validator/server_arb/machine_cache.go index 55ef61cf11..35f3406236 100644 --- a/validator/server_arb/machine_cache.go +++ b/validator/server_arb/machine_cache.go @@ -31,7 +31,7 @@ type MachineCache struct { } type MachineCacheConfig struct { - CachedChallengeMachines int `koanf:"cached-challenge-machines"` + CachedChallengeMachines uint64 `koanf:"cached-challenge-machines"` InitialSteps uint64 `koanf:"initial-steps"` } @@ -42,7 +42,7 @@ var DefaultMachineCacheConfig = MachineCacheConfig{ func MachineCacheConfigConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".initial-steps", DefaultMachineCacheConfig.InitialSteps, "initial steps between machines") - f.Int(prefix+".cached-challenge-machines", DefaultMachineCacheConfig.CachedChallengeMachines, "how many machines to store in cache while working on a challenge (should be even)") + f.Uint64(prefix+".cached-challenge-machines", DefaultMachineCacheConfig.CachedChallengeMachines, "how many machines to store in cache while working on a challenge (should be even)") } // `initialMachine` won't be mutated by this function. @@ -140,7 +140,7 @@ func (c *MachineCache) unlockBuild(err error) { } func (c *MachineCache) setRangeLocked(ctx context.Context, start uint64, end uint64) error { - newInterval := (end - start) / uint64(c.config.CachedChallengeMachines) + newInterval := (end - start) / c.config.CachedChallengeMachines if newInterval == 0 { newInterval = 2 } @@ -150,7 +150,7 @@ func (c *MachineCache) setRangeLocked(ctx context.Context, start uint64, end uin if end >= c.finalMachineStep { end = c.finalMachineStep - newInterval/2 } - newInterval = (end - start) / uint64(c.config.CachedChallengeMachines) + newInterval = (end - start) / c.config.CachedChallengeMachines if newInterval == 0 { newInterval = 1 } @@ -212,7 +212,7 @@ func (c *MachineCache) populateCache(ctx context.Context) error { if nextMachine.GetStepCount()+c.machineStepInterval >= c.finalMachineStep { break } - if len(c.machines) >= c.config.CachedChallengeMachines { + if uint64(len(c.machines)) >= c.config.CachedChallengeMachines { break } nextMachine = nextMachine.CloneMachineInterface() @@ -236,6 +236,7 @@ func (c *MachineCache) getClosestMachine(stepCount uint64) (int, MachineInterfac } stepsFromStart := stepCount - c.firstMachineStep var index int + // #nosec G115 if c.machineStepInterval == 0 || stepsFromStart > c.machineStepInterval*uint64(len(c.machines)-1) { index = len(c.machines) - 1 } else { diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index e7753748ab..06c451bda1 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -306,6 +306,7 @@ func (machine *JitMachine) prove( if err != nil { return state, fmt.Errorf("failed to read memory usage from Jit machine: %w", err) } + // #nosec G115 if memoryUsed > uint64(machine.wasmMemoryUsageLimit) { log.Warn("memory used by jit wasm exceeds the wasm memory usage limit", "limit", machine.wasmMemoryUsageLimit, "memoryUsed", memoryUsed) } diff --git a/wavmio/stub.go b/wavmio/stub.go index 1395fb4235..0c82506ff3 100644 --- a/wavmio/stub.go +++ b/wavmio/stub.go @@ -66,6 +66,7 @@ func parsePreimageBytes(path string) { if err != nil { panic(err) } + // #nosec G115 if uint64(read) != fieldSize { panic("missing bytes reading data") } @@ -77,18 +78,18 @@ func parsePreimageBytes(path string) { func StubInit() { preimages = make(map[common.Hash][]byte) var delayedMsgPath arrayFlags - seqMsgPosFlag := flag.Int("inbox-position", 0, "position for sequencer inbox message") - posWithinMsgFlag := flag.Int("position-within-message", 0, "position inside sequencer inbox message") - delayedPositionFlag := flag.Int("delayed-inbox-position", 0, "position for first delayed inbox message") + seqMsgPosFlag := flag.Uint64("inbox-position", 0, "position for sequencer inbox message") + posWithinMsgFlag := flag.Uint64("position-within-message", 0, "position inside sequencer inbox message") + delayedPositionFlag := flag.Uint64("delayed-inbox-position", 0, "position for first delayed inbox message") lastBlockFlag := flag.String("last-block-hash", "0000000000000000000000000000000000000000000000000000000000000000", "lastBlockHash") flag.Var(&delayedMsgPath, "delayed-inbox", "delayed inbox messages (multiple values)") inboxPath := flag.String("inbox", "", "file to load sequencer message") preimagesPath := flag.String("preimages", "", "file to load preimages from") flag.Parse() - seqMsgPos = uint64(*seqMsgPosFlag) - posWithinMsg = uint64(*posWithinMsgFlag) - delayedMsgFirstPos = uint64(*delayedPositionFlag) + seqMsgPos = *seqMsgPosFlag + posWithinMsg = *posWithinMsgFlag + delayedMsgFirstPos = *delayedPositionFlag lastBlockHash = common.HexToHash(*lastBlockFlag) for _, path := range delayedMsgPath { msg, err := os.ReadFile(path)