PQ Signature Performance

Analysis of post-quantum cryptographic signature performance in Lean Consensus clients.

This notebook examines:

  • Attestation signing time (p50, p95, p99)
  • Attestation verification time
  • Signature counts (total, valid, invalid)
  • Performance comparison across clients
Show code
import json
from pathlib import Path

import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots

# Set default renderer for static HTML output
import plotly.io as pio
pio.renderers.default = "notebook"
Show code
# Resolve devnet_id
DATA_DIR = Path("../data")

if devnet_id is None:
    # Use latest devnet from manifest
    devnets_path = DATA_DIR / "devnets.json"
    if devnets_path.exists():
        with open(devnets_path) as f:
            devnets = json.load(f).get("devnets", [])
        if devnets:
            devnet_id = devnets[-1]["id"]  # Latest
            print(f"Using latest devnet: {devnet_id}")
    else:
        raise ValueError("No devnets.json found. Run 'just detect-devnets' first.")

DEVNET_DIR = DATA_DIR / devnet_id
print(f"Loading data from: {DEVNET_DIR}")
Loading data from: ../data/pqdevnet-20260324T0920Z
Show code
# Load devnet metadata
with open(DATA_DIR / "devnets.json") as f:
    devnets_data = json.load(f)
    devnet_info = next((d for d in devnets_data["devnets"] if d["id"] == devnet_id), None)

if devnet_info:
    print(f"Devnet: {devnet_info['id']}")
    print(f"Duration: {devnet_info['duration_hours']:.1f} hours")
    print(f"Time: {devnet_info['start_time']} to {devnet_info['end_time']}")
    print(f"Slots: {devnet_info['start_slot']}{devnet_info['end_slot']}")
    print(f"Clients: {', '.join(devnet_info['clients'])}")
Devnet: pqdevnet-20260324T0920Z
Duration: 0.2 hours
Time: 2026-03-24T09:20:42+00:00 to 2026-03-24T09:31:41+00:00
Slots: 0 → 125
Clients: ethlambda_0, ethlambda_1, ethlambda_2, gean_0, grandine_0, nlean_0, nlean_1, qlean_0, qlean_1, zeam_0

Load Data

Show code
# Load PQ signature timing data
timing_df = pd.read_parquet(DEVNET_DIR / "pq_signature_timing.parquet")
print(f"Loaded {len(timing_df)} timing records")
print(f"Metrics: {timing_df['metric'].unique().tolist()}")
print(f"Clients: {timing_df['client'].unique().tolist()}")
Loaded 198 timing records
Metrics: ['signing', 'verification', 'agg_verification']
Clients: ['ethlambda_0', 'ethlambda_1', 'ethlambda_2', 'gean_0', 'grandine_0', 'nlean_0', 'nlean_1', 'qlean_0', 'qlean_1', 'zeam_0']
Show code
# Load PQ signature counts
counts_df = pd.read_parquet(DEVNET_DIR / "pq_signature_metrics.parquet")
print(f"Loaded {len(counts_df)} count records")
print(f"Metrics: {counts_df['metric'].unique().tolist()}")

# Unified client list from devnet metadata (includes all containers via cAdvisor)
all_clients = sorted(devnet_info["clients"])
n_cols = min(len(all_clients), 2)
n_rows = -(-len(all_clients) // n_cols)
print(f"\nAll clients ({len(all_clients)}): {all_clients}")
Loaded 383 count records
Metrics: ['lean_pq_sig_aggregated_signatures_valid_total', 'lean_pq_sig_aggregated_signatures_invalid_total', 'lean_pq_sig_aggregated_signatures_total', 'lean_pq_sig_attestations_in_aggregated_signatures_total']

All clients (10): ['ethlambda_0', 'ethlambda_1', 'ethlambda_2', 'gean_0', 'grandine_0', 'nlean_0', 'nlean_1', 'qlean_0', 'qlean_1', 'zeam_0']

Attestation Signature Counts

Total aggregated signatures produced by each client, broken down by validation result.

Show code
# Signature counts over time per client
# These are cumulative Prometheus counters — plot as-is to show growth over time.
from IPython.display import HTML, display

count_metrics = {
    "lean_pq_sig_aggregated_signatures_total": ("Total", "#636EFA"),
    "lean_pq_sig_aggregated_signatures_valid_total": ("Valid", "#00CC96"),
    "lean_pq_sig_aggregated_signatures_invalid_total": ("Invalid", "#EF553B"),
}

if counts_df.empty:
    print("No signature count data available")
else:
    sig_df = counts_df[counts_df["metric"].isin(count_metrics)].copy()

    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = sig_df[sig_df["client"] == client]
        if not cdf.empty:
            for metric_name, (label, color) in count_metrics.items():
                mdf = cdf[cdf["metric"] == metric_name].sort_values("timestamp")
                if mdf.empty:
                    continue
                show_legend = label not in legend_added
                legend_added.add(label)
                fig.add_trace(
                    go.Scatter(
                        x=mdf["timestamp"], y=mdf["value"],
                        name=label, legendgroup=label,
                        showlegend=show_legend,
                        line=dict(color=color),
                    ),
                    row=row, col=col,
                )
            fig.update_yaxes(title_text="count", row=row, col=col)
        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo="skip"),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )

    fig.update_layout(
        title="Cumulative Attestation Signature Counts by Client",
        height=270 * n_rows,
    )
    fig.show()

Attestation Signing Time

How long does it take to sign an attestation using post-quantum cryptography?

Show code
# Filter to signing time metric
signing_df = timing_df[timing_df["metric"] == "signing"].copy()

if signing_df.empty:
    print("No signing time data available")
else:
    # Convert to milliseconds for readability
    signing_df["value_ms"] = signing_df["value"] * 1000

    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {"0.5": "#636EFA", "0.95": "#EF553B", "0.99": "#00CC96"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = signing_df[signing_df["client"] == client]
        if not cdf.empty:
            for q in sorted(cdf["quantile"].unique()):
                qdf = cdf[cdf["quantile"] == q].sort_values("timestamp")
                q_str = str(q)
                label = f"p{int(q * 100)}"
                show_legend = q_str not in legend_added
                legend_added.add(q_str)
                fig.add_trace(
                    go.Scatter(
                        x=qdf["timestamp"], y=qdf["value_ms"],
                        name=label, legendgroup=q_str,
                        showlegend=show_legend,
                        line=dict(color=colors.get(q_str, "#AB63FA")),
                    ),
                    row=row, col=col,
                )
            fig.update_yaxes(title_text="ms", row=row, col=col)

        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
    fig.update_layout(
        title="Attestation Signing Time by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not signing_df.empty:
    summary = signing_df.groupby(["client", "quantile"])["value_ms"].agg(["mean", "min", "max"]).round(3)
    summary.columns = ["Mean (ms)", "Min (ms)", "Max (ms)"]
    display(summary)
Mean (ms) Min (ms) Max (ms)
client quantile
ethlambda_0 0.50 2.567 2.537 2.596
0.95 4.877 4.821 4.932
0.99 7.624 6.600 8.647
ethlambda_1 0.50 2.522 2.500 2.544
0.95 4.791 4.750 4.833
0.99 10.607 4.950 16.264
ethlambda_2 0.50 2.565 2.535 2.596
0.95 4.874 4.817 4.932
0.99 11.326 6.400 16.252
gean_0 0.50 2.917 2.917 2.917
0.95 14.500 14.500 14.500
0.99 22.900 22.900 22.900
grandine_0 0.50 7.297 7.093 7.500
0.95 19.696 18.143 21.250
0.99 23.939 23.629 24.250
nlean_0 0.50 14.696 14.632 14.760
0.95 24.168 23.976 24.360
0.99 31.273 24.795 37.750
nlean_1 0.50 15.103 14.714 15.491
0.95 24.203 24.049 24.357
0.99 31.155 24.810 37.500
qlean_0 0.50 2.588 2.572 2.604
0.95 4.918 4.888 4.948
0.99 8.488 8.225 8.750
qlean_1 0.50 2.536 2.500 2.572
0.95 4.819 4.750 4.888
0.99 6.588 4.950 8.225
zeam_0 0.50 2.552 2.500 2.604
0.95 4.849 4.750 4.948
0.99 6.850 4.950 8.750

Attestation Verification Time

How long does it take to verify an attestation signature?

Show code
# Filter to verification time metric
verification_df = timing_df[timing_df["metric"] == "verification"].copy()

if verification_df.empty:
    print("No verification time data available")
else:
    verification_df["value_ms"] = verification_df["value"] * 1000

    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {"0.5": "#636EFA", "0.95": "#EF553B", "0.99": "#00CC96"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = verification_df[verification_df["client"] == client]
        if not cdf.empty:
            for q in sorted(cdf["quantile"].unique()):
                qdf = cdf[cdf["quantile"] == q].sort_values("timestamp")
                q_str = str(q)
                label = f"p{int(q * 100)}"
                show_legend = q_str not in legend_added
                legend_added.add(q_str)
                fig.add_trace(
                    go.Scatter(
                        x=qdf["timestamp"], y=qdf["value_ms"],
                        name=label, legendgroup=q_str,
                        showlegend=show_legend,
                        line=dict(color=colors.get(q_str, "#AB63FA")),
                    ),
                    row=row, col=col,
                )
            fig.update_yaxes(title_text="ms", row=row, col=col)

        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
    fig.update_layout(
        title="Attestation Verification Time by Client",
        height=270 * n_rows,
    )
    fig.show()
Show code
# Summary statistics by client
if not verification_df.empty:
    summary = verification_df.groupby(["client", "quantile"])["value_ms"].agg(["mean", "min", "max"]).round(3)
    summary.columns = ["Mean (ms)", "Min (ms)", "Max (ms)"]
    display(summary)
Mean (ms) Min (ms) Max (ms)
client quantile
gean_0 0.50 2.500 2.50 2.500
0.95 4.750 4.75 4.750
0.99 4.950 4.95 4.950
grandine_0 0.50 NaN NaN NaN
0.95 NaN NaN NaN
0.99 NaN NaN NaN
nlean_0 0.50 2.500 2.50 2.500
0.95 4.750 4.75 4.750
0.99 4.950 4.95 4.950
nlean_1 0.50 2.500 2.50 2.500
0.95 4.750 4.75 4.750
0.99 4.950 4.95 4.950
qlean_0 0.50 2.502 2.50 2.504
0.95 4.754 4.75 4.758
0.99 4.954 4.95 4.959
qlean_1 0.50 2.502 2.50 2.504
0.95 4.754 4.75 4.758
0.99 4.954 4.95 4.959
zeam_0 0.50 2.503 2.50 2.505
0.95 4.755 4.75 4.760
0.99 4.955 4.95 4.960

Summary

Key findings from this devnet iteration:

Show code
# Generate summary statistics
print(f"Devnet: {devnet_id}")
print(f"Duration: {devnet_info['duration_hours']:.1f} hours")
print(f"Clients analyzed: {len(timing_df['client'].unique())}")
print()

if not signing_df.empty:
    p95_mean = signing_df[signing_df["quantile"] == 0.95]["value_ms"].mean()
    print(f"Average P95 signing time: {p95_mean:.2f} ms")

if not verification_df.empty:
    p95_ver = verification_df[verification_df["quantile"] == 0.95]["value_ms"].mean()
    print(f"Average P95 verification time: {p95_ver:.2f} ms")

if not counts_df.empty:
    sig_totals = counts_df[counts_df["metric"] == "lean_pq_sig_aggregated_signatures_total"].groupby("client")["value"].max()
    valid_totals = counts_df[counts_df["metric"] == "lean_pq_sig_aggregated_signatures_valid_total"].groupby("client")["value"].max()
    invalid_totals = counts_df[counts_df["metric"] == "lean_pq_sig_aggregated_signatures_invalid_total"].groupby("client")["value"].max()
    print(f"\nTotal signatures: {int(sig_totals.sum()):,}")
    print(f"Valid signatures: {int(valid_totals.sum()):,}")
    print(f"Invalid signatures: {int(invalid_totals.sum()):,}")
Devnet: pqdevnet-20260324T0920Z
Duration: 0.2 hours
Clients analyzed: 10

Average P95 signing time: 10.99 ms
Average P95 verification time: 4.75 ms

Total signatures: 755
Valid signatures: 5,736
Invalid signatures: 0