-
Notifications
You must be signed in to change notification settings - Fork 253
feat(benchmarking): adding gas burner test #3115
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
81dc810
cc56590
18fc15a
fccd9db
ae525ca
039eaf7
85c9d2d
fe8d166
99e3e42
e4e06c5
1c3b560
03b9239
fe3ca23
8752fee
06f532b
560974a
054f2c6
26bb117
b88cae3
272ab71
676e0d1
f4949a1
4c7b7e1
6f56c80
3eb733a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,121 @@ | ||||||
| //go:build evm | ||||||
|
|
||||||
| package benchmark | ||||||
|
|
||||||
| import ( | ||||||
| "context" | ||||||
| "fmt" | ||||||
| "time" | ||||||
|
|
||||||
| "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" | ||||||
| ) | ||||||
|
|
||||||
| // TestGasBurner measures gas throughput using a deterministic gasburner | ||||||
| // workload. The result is tracked via BENCH_JSON_OUTPUT as seconds_per_gigagas | ||||||
| // (lower is better) on the benchmark dashboard. | ||||||
| func (s *SpamoorSuite) TestGasBurner() { | ||||||
| const ( | ||||||
| numSpammers = 4 | ||||||
| countPerSpammer = 2500 | ||||||
| totalCount = numSpammers * countPerSpammer | ||||||
| warmupTxs = 50 | ||||||
| serviceName = "ev-node-gasburner" | ||||||
| waitTimeout = 5 * time.Minute | ||||||
| ) | ||||||
|
|
||||||
| t := s.T() | ||||||
| ctx := t.Context() | ||||||
| w := newResultWriter(t, "GasBurner") | ||||||
| defer w.flush() | ||||||
|
|
||||||
| e := s.setupEnv(config{ | ||||||
| serviceName: serviceName, | ||||||
| }) | ||||||
| api := e.spamoorAPI | ||||||
|
|
||||||
| s.Require().NoError(deleteAllSpammers(api), "failed to delete stale spammers") | ||||||
|
|
||||||
| gasburnerCfg := map[string]any{ | ||||||
| "gas_units_to_burn": 5_000_000, | ||||||
| "total_count": countPerSpammer, | ||||||
| "throughput": 25, | ||||||
| "max_pending": 5000, | ||||||
| "max_wallets": 500, | ||||||
| "rebroadcast": 0, | ||||||
| "base_fee": 20, | ||||||
| "tip_fee": 5, | ||||||
| "refill_amount": "5000000000000000000", | ||||||
| "refill_balance": "2000000000000000000", | ||||||
| "refill_interval": 300, | ||||||
| } | ||||||
|
|
||||||
| for i := range numSpammers { | ||||||
| name := fmt.Sprintf("bench-gasburner-%d", i) | ||||||
| id, err := api.CreateSpammer(name, spamoor.ScenarioGasBurnerTX, gasburnerCfg, true) | ||||||
| s.Require().NoError(err, "failed to create spammer %s", name) | ||||||
| t.Cleanup(func() { _ = api.DeleteSpammer(id) }) | ||||||
| } | ||||||
|
|
||||||
| // wait for wallet prep and contract deployment to finish before | ||||||
| // recording start block so warmup is excluded from the measurement. | ||||||
| pollSentTotal := func() (float64, error) { | ||||||
| metrics, mErr := api.GetMetrics() | ||||||
| if mErr != nil { | ||||||
| return 0, mErr | ||||||
| } | ||||||
| return sumCounter(metrics["spamoor_transactions_sent_total"]), nil | ||||||
| } | ||||||
| waitForMetricTarget(t, "spamoor_transactions_sent_total (warmup)", pollSentTotal, warmupTxs, waitTimeout) | ||||||
|
|
||||||
| startHeader, err := e.ethClient.HeaderByNumber(ctx, nil) | ||||||
| s.Require().NoError(err, "failed to get start block header") | ||||||
| startBlock := startHeader.Number.Uint64() | ||||||
| loadStart := time.Now() | ||||||
| t.Logf("start block: %d (after warmup)", startBlock) | ||||||
|
|
||||||
| // wait for all transactions to be sent | ||||||
| waitForMetricTarget(t, "spamoor_transactions_sent_total", pollSentTotal, float64(totalCount), waitTimeout) | ||||||
|
|
||||||
| // wait for pending txs to drain | ||||||
| drainCtx, drainCancel := context.WithTimeout(ctx, 30*time.Second) | ||||||
| defer drainCancel() | ||||||
| waitForDrain(drainCtx, t.Logf, e.ethClient, 10) | ||||||
| wallClock := time.Since(loadStart) | ||||||
|
|
||||||
| endHeader, err := e.ethClient.HeaderByNumber(ctx, nil) | ||||||
| s.Require().NoError(err, "failed to get end block header") | ||||||
| endBlock := endHeader.Number.Uint64() | ||||||
| t.Logf("end block: %d (range %d blocks)", endBlock, endBlock-startBlock) | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fix inclusive block-range count in log message. Line 88 reports 🛠️ Proposed fix- t.Logf("end block: %d (range %d blocks)", endBlock, endBlock-startBlock)
+ t.Logf("end block: %d (range %d blocks)", endBlock, endBlock-startBlock+1)📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||
|
|
||||||
| // collect block-level gas/tx metrics | ||||||
| bm, err := collectBlockMetrics(ctx, e.ethClient, startBlock, endBlock) | ||||||
| s.Require().NoError(err, "failed to collect block metrics") | ||||||
|
|
||||||
| summary := bm.summarize() | ||||||
| s.Require().Greater(summary.SteadyState, time.Duration(0), "expected non-zero steady-state duration") | ||||||
| summary.log(t, startBlock, endBlock, bm.TotalBlockCount, bm.BlockCount, wallClock) | ||||||
|
|
||||||
| // derive seconds_per_gigagas from the summary's MGas/s | ||||||
| var secsPerGigagas float64 | ||||||
| if summary.AchievedMGas > 0 { | ||||||
| // MGas/s -> Ggas/s = MGas/s / 1000, then invert | ||||||
| secsPerGigagas = 1000.0 / summary.AchievedMGas | ||||||
| } | ||||||
| t.Logf("seconds_per_gigagas: %.4f", secsPerGigagas) | ||||||
|
|
||||||
| // collect and report traces | ||||||
| traces := s.collectTraces(e, serviceName) | ||||||
|
|
||||||
| if overhead, ok := evNodeOverhead(traces.evNode); ok { | ||||||
| t.Logf("ev-node overhead: %.1f%%", overhead) | ||||||
| w.addEntry(entry{Name: "GasBurner - ev-node overhead", Unit: "%", Value: overhead}) | ||||||
| } | ||||||
|
|
||||||
| w.addEntries(summary.entries("GasBurner")) | ||||||
| w.addSpans(traces.allSpans()) | ||||||
| w.addEntry(entry{ | ||||||
| Name: fmt.Sprintf("%s - seconds_per_gigagas", w.label), | ||||||
| Unit: "s/Ggas", | ||||||
| Value: secsPerGigagas, | ||||||
| }) | ||||||
| } | ||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Handle
waitForDrainerrors instead of dropping them.Line 82 ignores the returned error, so the test can proceed with a non-drained mempool and publish skewed throughput numbers.
✅ Proposed fix
As per coding guidelines, "Return errors early" and "Ensure tests are deterministic".
🤖 Prompt for AI Agents