diff --git a/routes/benchmarks.tsx b/routes/benchmarks.tsx index 5643cbe506..6e4fc63941 100644 --- a/routes/benchmarks.tsx +++ b/routes/benchmarks.tsx @@ -120,13 +120,31 @@ export default function Benchmarks({ url, data }: PageProps) { ? benchData.normalizedMaxLatency : benchData.maxLatency} yLabel="milliseconds" - yTickFormat={formatMsec} + yTickFormat={formatReqSec} />

Max latency during the same test used above for requests/second. Smaller is better.

+
+ +
+ WebSocket Server Throughput +
+
{" "} + +

+ Tests WebSocket server performance. 100 concurrent connections do as + many echo requests as possible. Bigger is better. +

+
); } diff --git a/utils/benchmark_utils.ts b/utils/benchmark_utils.ts index 84d6c5a7e0..ed51a52106 100644 --- a/utils/benchmark_utils.ts +++ b/utils/benchmark_utils.ts @@ -35,6 +35,7 @@ export interface BenchmarkRun { thread_count?: BenchmarkVariantsResultSet; throughput?: BenchmarkVariantsResultSet; lsp_exec_time?: BenchmarkVariantsResultSet; + ws_msg_per_sec?: BenchmarkVariantsResultSet; } export type BenchmarkName = Exclude; @@ -328,6 +329,8 @@ export interface BenchmarkData { cargoDeps: Column[]; sha1List: string[]; lspExecTime: Column[]; + msgPerSec: Column[]; + normalizedMsgPerSec: Column[]; } export function reshape(data: BenchmarkRun[]): BenchmarkData { @@ -338,6 +341,7 @@ export function reshape(data: BenchmarkRun[]): BenchmarkData { const normalizedReqPerSec = createNormalizedColumns(data, "req_per_sec"); const normalizedMaxLatency = createNormalizedColumns(data, "max_latency"); + const normalizedMsgPerSec = createNormalizedColumns(data, "ws_msg_per_sec"); return { execTime: createColumns( @@ -353,6 +357,8 @@ export function reshape(data: BenchmarkRun[]): BenchmarkData { throughput: createColumns(data, "throughput"), reqPerSec: createColumns(data, "req_per_sec"), normalizedReqPerSec, + msgPerSec: createColumns(data, "ws_msg_per_sec"), + normalizedMsgPerSec, proxy: createColumns(data, "req_per_sec_proxy"), maxLatency: createColumns(data, "max_latency"), normalizedMaxLatency,