Aggregation Performance

Analysis of post-quantum signature aggregation and verification in Lean Consensus clients.

This notebook examines:

  • Signature aggregation time
  • Total signatures aggregated
  • Aggregation verification time
  • Aggregation and verification throughput (per second and per slot)
  • Performance comparison across clients
Show code
import json
from pathlib import Path

import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots

# Set default renderer for static HTML output
import plotly.io as pio
pio.renderers.default = "notebook"
Show code
# Resolve devnet_id
DATA_DIR = Path("../data")

if devnet_id is None:
    # Use latest devnet from manifest
    devnets_path = DATA_DIR / "devnets.json"
    if devnets_path.exists():
        with open(devnets_path) as f:
            devnets = json.load(f).get("devnets", [])
        if devnets:
            devnet_id = devnets[-1]["id"]  # Latest
            print(f"Using latest devnet: {devnet_id}")
    else:
        raise ValueError("No devnets.json found. Run 'just detect-devnets' first.")

DEVNET_DIR = DATA_DIR / devnet_id
print(f"Loading data from: {DEVNET_DIR}")
Loading data from: ../data/pqdevnet-20260326T0346Z
Show code
# Load devnet metadata
with open(DATA_DIR / "devnets.json") as f:
    devnets_data = json.load(f)
    devnet_info = next((d for d in devnets_data["devnets"] if d["id"] == devnet_id), None)

if devnet_info:
    print(f"Devnet: {devnet_info['id']}")
    print(f"Duration: {devnet_info['duration_hours']:.1f} hours")
    print(f"Time: {devnet_info['start_time']} to {devnet_info['end_time']}")
    print(f"Slots: {devnet_info['start_slot']} β†’ {devnet_info['end_slot']}")
    print(f"Clients: {', '.join(devnet_info['clients'])}")
Devnet: pqdevnet-20260326T0346Z
Duration: 16.3 hours
Time: 2026-03-26T03:46:30+00:00 to 2026-03-26T20:06:29+00:00
Slots: 0 β†’ 33374
Clients: ethlambda_0, ethlambda_1, ethlambda_2, ethlambda_3, ethlambda_4, gean_0, grandine_0, lantern_0, nlean_0, qlean_0, ream_0, zeam_0

Load DataΒΆ

Show code
# Load PQ signature timing data
timing_df = pd.read_parquet(DEVNET_DIR / "pq_signature_timing.parquet")
print(f"Loaded {len(timing_df)} timing records")
print(f"Metrics: {timing_df['metric'].unique().tolist()}")
print(f"Clients: {timing_df['client'].unique().tolist()}")
Loaded 16512 timing records
Metrics: ['signing', 'verification', 'agg_verification']
Clients: ['ethlambda_0', 'ethlambda_1', 'ethlambda_2', 'ethlambda_3', 'ethlambda_4', 'gean_0', 'grandine_0', 'lantern_0', 'nlean_0', 'qlean_0', 'ream_0', 'zeam_0']
Show code
# Load PQ signature counts
counts_df = pd.read_parquet(DEVNET_DIR / "pq_signature_metrics.parquet")
print(f"Loaded {len(counts_df)} count records")
print(f"Metrics: {counts_df['metric'].unique().tolist()}")

# Unified client list from devnet metadata (includes all containers via cAdvisor)
all_clients = sorted(devnet_info["clients"])
n_cols = min(len(all_clients), 2)
n_rows = -(-len(all_clients) // n_cols)
print(f"\nAll clients ({len(all_clients)}): {all_clients}")
Loaded 37354 count records
Metrics: ['lean_pq_sig_aggregated_signatures_valid_total', 'lean_pq_sig_aggregated_signatures_invalid_total', 'lean_pq_sig_aggregated_signatures_total', 'lean_pq_sig_attestations_in_aggregated_signatures_total']

All clients (12): ['ethlambda_0', 'ethlambda_1', 'ethlambda_2', 'ethlambda_3', 'ethlambda_4', 'gean_0', 'grandine_0', 'lantern_0', 'nlean_0', 'qlean_0', 'ream_0', 'zeam_0']

Total Signatures AggregatedΒΆ

Cumulative number of individual signatures included in aggregated signature proofs over time.

Show code
att_in_agg_df = counts_df[counts_df["metric"] == "lean_pq_sig_attestations_in_aggregated_signatures_total"]

if att_in_agg_df.empty:
    print("No signatures in aggregated signature proof available")
else:
    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = att_in_agg_df[att_in_agg_df["client"] == client].sort_values("timestamp")
        if not cdf.empty and cdf["value"].max() > 0:
            fig.add_trace(
                go.Scatter(
                    x=cdf["timestamp"], y=cdf["value"],
                    showlegend=False,
                    line=dict(color="#636EFA"),
                ),
                row=row, col=col,
            )
            fig.update_yaxes(title_text="count", row=row, col=col)
        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )

    fig.update_layout(
        title="Total Signatures Aggregated by Client",
        height=270 * n_rows,
    )
    fig.show()

Signature Aggregation TimeΒΆ

Time to build an aggregated signatures proof from individual post-quantum signatures.

Show code
# Filter to aggregate building metric
agg_build_df = timing_df[timing_df["metric"] == "agg_building"].copy()

if agg_build_df.empty:
    print("No signature aggregation timing data available")
else:
    agg_build_df["value_ms"] = agg_build_df["value"] * 1000

    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {"0.5": "#636EFA", "0.95": "#EF553B", "0.99": "#00CC96"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = agg_build_df[agg_build_df["client"] == client]
        if not cdf.empty:
            for q in sorted(cdf["quantile"].unique()):
                qdf = cdf[cdf["quantile"] == q].sort_values("timestamp")
                q_str = str(q)
                label = f"p{int(q * 100)}"
                show_legend = q_str not in legend_added
                legend_added.add(q_str)
                fig.add_trace(
                    go.Scatter(
                        x=qdf["timestamp"], y=qdf["value_ms"],
                        name=label, legendgroup=q_str,
                        showlegend=show_legend,
                        line=dict(color=colors.get(q_str, "#AB63FA")),
                    ),
                    row=row, col=col,
                )
        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
        fig.update_yaxes(title_text="ms", row=row, col=col)

    fig.update_layout(
        title="Signature Aggregation Time by Client",
        height=270 * n_rows,
    )
    fig.show()
No signature aggregation timing data available
Show code
# Summary statistics by client
if not agg_build_df.empty:
    summary = agg_build_df.groupby(["client", "quantile"])["value_ms"].agg(["mean", "min", "max"]).round(3)
    summary.columns = ["Mean (ms)", "Min (ms)", "Max (ms)"]
    display(summary)

Aggregation Verification TimeΒΆ

Time to verify an aggregated post-quantum signatures proof.

Show code
# Filter to aggregate verification metric
agg_ver_df = timing_df[timing_df["metric"] == "agg_verification"].copy()

if agg_ver_df.empty:
    print("No aggregated signature verification timing data available")
else:
    agg_ver_df["value_ms"] = agg_ver_df["value"] * 1000

    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {"0.5": "#636EFA", "0.95": "#EF553B", "0.99": "#00CC96"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = agg_ver_df[agg_ver_df["client"] == client]
        if not cdf.empty:
            for q in sorted(cdf["quantile"].unique()):
                qdf = cdf[cdf["quantile"] == q].sort_values("timestamp")
                q_str = str(q)
                label = f"p{int(q * 100)}"
                show_legend = q_str not in legend_added
                legend_added.add(q_str)
                fig.add_trace(
                    go.Scatter(
                        x=qdf["timestamp"], y=qdf["value_ms"],
                        name=label, legendgroup=q_str,
                        showlegend=show_legend,
                        line=dict(color=colors.get(q_str, "#AB63FA")),
                    ),
                    row=row, col=col,
                )
        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
        fig.update_yaxes(title_text="ms", row=row, col=col)

    fig.update_layout(
        title="Aggregation Verification Time by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not agg_ver_df.empty:
    summary = agg_ver_df.groupby(["client", "quantile"])["value_ms"].agg(["mean", "min", "max"]).round(3)
    summary.columns = ["Mean (ms)", "Min (ms)", "Max (ms)"]
    display(summary)
Mean (ms) Min (ms) Max (ms)
client quantile
ethlambda_0 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
ethlambda_1 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
ethlambda_2 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
ethlambda_3 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
ethlambda_4 0.50 50.002 50.000 50.197
0.95 95.003 95.000 95.374
0.99 99.003 99.000 99.390
gean_0 0.50 137.985 99.627 163.971
0.95 238.875 234.944 249.274
0.99 260.459 246.989 2280.000
grandine_0 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
lantern_0 0.50 63.342 50.000 2625.000
0.95 114.521 95.000 3862.500
0.99 119.070 99.000 3972.500
nlean_0 0.50 NaN NaN NaN
0.95 NaN NaN NaN
0.99 NaN NaN NaN
qlean_0 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
ream_0 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000
zeam_0 0.50 50.000 50.000 50.000
0.95 95.000 95.000 95.000
0.99 99.000 99.000 99.000

Signatures Aggregated per SlotΒΆ

Rate of signature aggregations per slot (4-second slot time). Useful for researchers and implementers with a focus on signature aggregation in conjunction with consensus.

Show code
SLOT_TIME = 4  # seconds

agg_total_df = counts_df[counts_df["metric"] == "lean_pq_sig_aggregated_signatures_total"]

if agg_total_df.empty:
    print("No signature aggregation count data available")
else:
    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = agg_total_df[agg_total_df["client"] == client].sort_values("timestamp")
        if not cdf.empty and cdf["value"].max() > 0:
            dt = cdf["timestamp"].diff().dt.total_seconds()
            dv = cdf["value"].diff()
            rate = (dv / dt * SLOT_TIME).iloc[1:]
            ts = cdf["timestamp"].iloc[1:]
            mask = rate >= 0
            rate = rate[mask]
            ts = ts[mask]
            if not rate.empty:
                fig.add_trace(
                    go.Scatter(
                        x=ts, y=rate,
                        showlegend=False,
                        line=dict(color="#636EFA"),
                    ),
                    row=row, col=col,
                )
                fig.update_yaxes(title_text="/slot", row=row, col=col)
                continue
        fig.add_trace(
            go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
            row=row, col=col,
        )
        _n = (row - 1) * n_cols + col
        _s = "" if _n == 1 else str(_n)
        fig.add_annotation(
            text="No data available",
            xref=f"x{_s} domain", yref=f"y{_s} domain",
            x=0.5, y=0.5,
            showarrow=False,
            font=dict(size=12, color="#999"),
        )

    fig.update_layout(
        title="Signatures Aggregated per Slot by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not agg_total_df.empty:
    rows = []
    for client in all_clients:
        cdf = agg_total_df[agg_total_df["client"] == client].sort_values("timestamp")
        if cdf.empty or cdf["value"].max() == 0:
            continue
        dt = cdf["timestamp"].diff().dt.total_seconds()
        dv = cdf["value"].diff()
        rate = (dv / dt * SLOT_TIME).iloc[1:]
        rate = rate[rate >= 0]
        if not rate.empty:
            rows.append({"client": client, "Mean (/slot)": rate.mean(), "Min (/slot)": rate.min(), "Max (/slot)": rate.max()})
    if rows:
        display(pd.DataFrame(rows).set_index("client").round(3))
Mean (/slot) Min (/slot) Max (/slot)
client
ethlambda_0 2.735 1.533 5.867
ethlambda_1 0.200 0.000 0.933
ethlambda_2 0.229 0.000 0.933
ethlambda_3 0.224 0.000 0.800
ethlambda_4 0.218 0.000 0.800
gean_0 0.075 0.000 0.533
grandine_0 0.408 0.000 1.333
ream_0 0.485 0.000 8.733
zeam_0 0.779 0.000 2.200

Signatures Verified per SlotΒΆ

Rate of valid/invalid aggregated signature verifications per slot (4-second slot time). Useful for researchers and implementers with a focus on signature aggregation in conjunction with consensus.

Show code
SLOT_TIME = 4  # seconds

# Calculate valid/invalid signature counts
valid_df = counts_df[counts_df["metric"] == "lean_pq_sig_aggregated_signatures_valid_total"]
invalid_df = counts_df[counts_df["metric"] == "lean_pq_sig_aggregated_signatures_invalid_total"]

if valid_df.empty and invalid_df.empty:
    print("No signature count data available")
else:
    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {"valid": "#2ecc71", "invalid": "#e74c3c"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        has_data = False

        for status, sdf in [("valid", valid_df), ("invalid", invalid_df)]:
            cdf = sdf[sdf["client"] == client].sort_values("timestamp")
            if cdf.empty or cdf["value"].max() == 0:
                continue
            # Compute rate per slot: diff(value) / diff(seconds) * slot_time
            dt = cdf["timestamp"].diff().dt.total_seconds()
            dv = cdf["value"].diff()
            rate = (dv / dt * SLOT_TIME).iloc[1:]
            ts = cdf["timestamp"].iloc[1:]
            # Drop negative rates (counter resets)
            mask = rate >= 0
            rate = rate[mask]
            ts = ts[mask]
            if rate.empty:
                continue
            has_data = True
            show_legend = status not in legend_added
            legend_added.add(status)
            fig.add_trace(
                go.Scatter(
                    x=ts, y=rate,
                    name=status, legendgroup=status,
                    showlegend=show_legend,
                    line=dict(color=colors[status]),
                ),
                row=row, col=col,
            )

        if has_data:
            fig.update_yaxes(title_text="/slot", row=row, col=col)

        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
    fig.update_layout(
        title="Signatures Verified per Slot by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not valid_df.empty or not invalid_df.empty:
    rows = []
    for client in all_clients:
        for status, sdf in [("valid", valid_df), ("invalid", invalid_df)]:
            cdf = sdf[sdf["client"] == client].sort_values("timestamp")
            if cdf.empty or cdf["value"].max() == 0:
                continue
            dt = cdf["timestamp"].diff().dt.total_seconds()
            dv = cdf["value"].diff()
            rate = (dv / dt * SLOT_TIME).iloc[1:]
            rate = rate[rate >= 0]
            if not rate.empty:
                rows.append({"client": client, "status": status, "Mean (/slot)": rate.mean(), "Min (/slot)": rate.min(), "Max (/slot)": rate.max()})
    if rows:
        display(pd.DataFrame(rows).set_index(["client", "status"]).round(3))
Mean (/slot) Min (/slot) Max (/slot)
client status
ethlambda_0 valid 1.818 0.400 3.933
ethlambda_1 valid 1.818 0.400 3.933
ethlambda_2 valid 1.818 0.400 3.933
ethlambda_3 valid 1.818 0.533 4.067
ethlambda_4 valid 1.818 0.333 4.600
gean_0 valid 2.285 0.073 26.467
grandine_0 valid 3.336 0.000 26.667
lantern_0 valid 18.472 0.067 64.467
qlean_0 valid 5.409 0.000 27.200
ream_0 valid 1.801 0.000 16.200
zeam_0 valid 0.072 0.000 8.600

Signatures Aggregated per SecondΒΆ

Rate of signature aggregations per second. Useful for researchers and implementers with a focus on signature aggregation performance.

Show code
if agg_total_df.empty:
    print("No signature aggregation count data available")
else:
    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = agg_total_df[agg_total_df["client"] == client].sort_values("timestamp")
        if not cdf.empty and cdf["value"].max() > 0:
            dt = cdf["timestamp"].diff().dt.total_seconds()
            dv = cdf["value"].diff()
            rate = (dv / dt).iloc[1:]
            ts = cdf["timestamp"].iloc[1:]
            mask = rate >= 0
            rate = rate[mask]
            ts = ts[mask]
            if not rate.empty:
                fig.add_trace(
                    go.Scatter(
                        x=ts, y=rate,
                        showlegend=False,
                        line=dict(color="#636EFA"),
                    ),
                    row=row, col=col,
                )
                fig.update_yaxes(title_text="/s", row=row, col=col)
                continue
        fig.add_trace(
            go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
            row=row, col=col,
        )
        _n = (row - 1) * n_cols + col
        _s = "" if _n == 1 else str(_n)
        fig.add_annotation(
            text="No data available",
            xref=f"x{_s} domain", yref=f"y{_s} domain",
            x=0.5, y=0.5,
            showarrow=False,
            font=dict(size=12, color="#999"),
        )

    fig.update_layout(
        title="Signatures Aggregated per Second by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not agg_total_df.empty:
    rows = []
    for client in all_clients:
        cdf = agg_total_df[agg_total_df["client"] == client].sort_values("timestamp")
        if cdf.empty or cdf["value"].max() == 0:
            continue
        dt = cdf["timestamp"].diff().dt.total_seconds()
        dv = cdf["value"].diff()
        rate = (dv / dt).iloc[1:]
        rate = rate[rate >= 0]
        if not rate.empty:
            rows.append({"client": client, "Mean (/s)": rate.mean(), "Min (/s)": rate.min(), "Max (/s)": rate.max()})
    if rows:
        display(pd.DataFrame(rows).set_index("client").round(3))
Mean (/s) Min (/s) Max (/s)
client
ethlambda_0 0.684 0.383 1.467
ethlambda_1 0.050 0.000 0.233
ethlambda_2 0.057 0.000 0.233
ethlambda_3 0.056 0.000 0.200
ethlambda_4 0.055 0.000 0.200
gean_0 0.019 0.000 0.133
grandine_0 0.102 0.000 0.333
ream_0 0.121 0.000 2.183
zeam_0 0.195 0.000 0.550

Signatures Verified per SecondΒΆ

Rate of valid/invalid aggregated signature verifications per second. Useful for researchers and implementers with a focus on signature aggregation performance.

Show code
if valid_df.empty and invalid_df.empty:
    print("No signature count data available")
else:
    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {"valid": "#2ecc71", "invalid": "#e74c3c"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        has_data = False

        for status, sdf in [("valid", valid_df), ("invalid", invalid_df)]:
            cdf = sdf[sdf["client"] == client].sort_values("timestamp")
            if cdf.empty or cdf["value"].max() == 0:
                continue
            # Compute rate: diff(value) / diff(timestamp) in per-second
            dt = cdf["timestamp"].diff().dt.total_seconds()
            dv = cdf["value"].diff()
            rate = (dv / dt).iloc[1:]  # per second, skip first NaN
            ts = cdf["timestamp"].iloc[1:]
            # Drop negative rates (counter resets)
            mask = rate >= 0
            rate = rate[mask]
            ts = ts[mask]
            if rate.empty:
                continue
            has_data = True
            show_legend = status not in legend_added
            legend_added.add(status)
            fig.add_trace(
                go.Scatter(
                    x=ts, y=rate,
                    name=status, legendgroup=status,
                    showlegend=show_legend,
                    line=dict(color=colors[status]),
                ),
                row=row, col=col,
            )

        if has_data:
            fig.update_yaxes(title_text="/s", row=row, col=col)

        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
    fig.update_layout(
        title="Signatures Verified per Second by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not valid_df.empty or not invalid_df.empty:
    rows = []
    for client in all_clients:
        for status, sdf in [("valid", valid_df), ("invalid", invalid_df)]:
            cdf = sdf[sdf["client"] == client].sort_values("timestamp")
            if cdf.empty or cdf["value"].max() == 0:
                continue
            dt = cdf["timestamp"].diff().dt.total_seconds()
            dv = cdf["value"].diff()
            rate = (dv / dt).iloc[1:]
            rate = rate[rate >= 0]
            if not rate.empty:
                rows.append({"client": client, "status": status, "Mean (/s)": rate.mean(), "Min (/s)": rate.min(), "Max (/s)": rate.max()})
    if rows:
        display(pd.DataFrame(rows).set_index(["client", "status"]).round(3))
Mean (/s) Min (/s) Max (/s)
client status
ethlambda_0 valid 0.454 0.100 0.983
ethlambda_1 valid 0.454 0.100 0.983
ethlambda_2 valid 0.454 0.100 0.983
ethlambda_3 valid 0.455 0.133 1.017
ethlambda_4 valid 0.455 0.083 1.150
gean_0 valid 0.571 0.018 6.617
grandine_0 valid 0.834 0.000 6.667
lantern_0 valid 4.618 0.017 16.117
qlean_0 valid 1.352 0.000 6.800
ream_0 valid 0.450 0.000 4.050
zeam_0 valid 0.018 0.000 2.150

SummaryΒΆ

Key findings from this devnet iteration:

Show code
# Generate summary statistics
print(f"Devnet: {devnet_id}")
print(f"Duration: {devnet_info['duration_hours']:.1f} hours")
print(f"Clients analyzed: {len(timing_df['client'].unique())}")
print()

if not agg_build_df.empty:
    p95_agg = agg_build_df[agg_build_df["quantile"] == 0.95]["value_ms"].mean()
    print(f"Average P95 aggregation time: {p95_agg:.2f} ms")

if not agg_ver_df.empty:
    p95_ver = agg_ver_df[agg_ver_df["quantile"] == 0.95]["value_ms"].mean()
    print(f"Average P95 proof verification time: {p95_ver:.2f} ms")
Devnet: pqdevnet-20260326T0346Z
Duration: 16.3 hours
Clients analyzed: 12

Average P95 proof verification time: 111.24 ms