State Transitions

State Transitions

State transition timing analysis for PQ Devnet clients.

This notebook examines processing time percentiles (p50, p95, p99) for:

  • Total state transition time
  • Slot processing
  • Block processing
  • Attestation processing
  • Fork choice block processing
  • Attestation validation
Show code
import json
from pathlib import Path

import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IPython.display import display

# Set default renderer for static HTML output
import plotly.io as pio
pio.renderers.default = "notebook"
Show code
# Resolve devnet_id
DATA_DIR = Path("../data")

if devnet_id is None:
    devnets_path = DATA_DIR / "devnets.json"
    if devnets_path.exists():
        with open(devnets_path) as f:
            devnets = json.load(f).get("devnets", [])
        if devnets:
            devnet_id = devnets[-1]["id"]
            print(f"Using latest devnet: {devnet_id}")
    else:
        raise ValueError("No devnets.json found. Run 'just detect-devnets' first.")

DEVNET_DIR = DATA_DIR / devnet_id
print(f"Loading data from: {DEVNET_DIR}")
Loading data from: ../data/pqdevnet-20260202T1348Z
Show code
# Load devnet metadata
with open(DATA_DIR / "devnets.json") as f:
    devnets_data = json.load(f)
    devnet_info = next((d for d in devnets_data["devnets"] if d["id"] == devnet_id), None)

if devnet_info:
    print(f"Devnet: {devnet_info['id']}")
    print(f"Duration: {devnet_info['duration_hours']:.1f} hours")
    print(f"Time: {devnet_info['start_time']} to {devnet_info['end_time']}")
    print(f"Slots: {devnet_info['start_slot']} \u2192 {devnet_info['end_slot']}")
    print(f"Clients: {', '.join(devnet_info['clients'])}")
Devnet: pqdevnet-20260202T1348Z
Duration: 2.1 hours
Time: 2026-02-02T13:48:46+00:00 to 2026-02-02T15:52:45+00:00
Slots: 0 → 57977
Clients: ethlambda, grandine, lantern, qlean, ream, zeam

Load Data

Show code
# Load state transition timing data
timing_df = pd.read_parquet(DEVNET_DIR / "state_transition_timing.parquet")

# Filter out NaN/Inf values from histogram_quantile
timing_df = timing_df[timing_df["value"].notna() & (timing_df["value"] != float("inf"))]

# Deduplicate
timing_df = timing_df.groupby(["client", "metric", "quantile", "timestamp"], as_index=False)["value"].max()

# Convert to milliseconds
timing_df["value_ms"] = timing_df["value"] * 1000

print(f"Loaded {len(timing_df)} records")
print(f"Metrics: {sorted(timing_df['metric'].unique())}")
print(f"Quantiles: {sorted(timing_df['quantile'].unique())}")
print(f"Clients: {sorted(timing_df['client'].unique())}")

# Unified client list from devnet metadata (includes all containers via cAdvisor)
all_clients = sorted(devnet_info["clients"])
n_cols = min(len(all_clients), 2)
n_rows = -(-len(all_clients) // n_cols)
Loaded 1656 records
Metrics: ['attestation_validation', 'attestations', 'block', 'fork_choice', 'slots', 'total']
Quantiles: [np.float64(0.5), np.float64(0.95), np.float64(0.99)]
Clients: ['ethlambda', 'lantern', 'qlean', 'ream', 'zeam']

Total State Transition Time

End-to-end time for the full state transition, from lean_state_transition_time_seconds.

Show code
def plot_metric(df, metric_name, title, ylabel="ms"):
    """Plot a single metric with p50/p95/p99 per client."""
    mdf = df[df["metric"] == metric_name]

    fig = make_subplots(
        rows=n_rows, cols=n_cols,
        subplot_titles=all_clients,
        vertical_spacing=0.12 / max(n_rows - 1, 1) * 2,
        horizontal_spacing=0.08,
    )

    colors = {0.5: "#636EFA", 0.95: "#EF553B", 0.99: "#00CC96"}
    legend_added = set()

    for i, client in enumerate(all_clients):
        row = i // n_cols + 1
        col = i % n_cols + 1
        cdf = mdf[mdf["client"] == client]
        if not cdf.empty:
            for q in sorted(cdf["quantile"].unique()):
                qdf = cdf[cdf["quantile"] == q].sort_values("timestamp")
                label = f"p{int(q * 100)}"
                show_legend = q not in legend_added
                legend_added.add(q)
                fig.add_trace(
                    go.Scatter(
                        x=qdf["timestamp"], y=qdf["value_ms"],
                        name=label, legendgroup=str(q),
                        showlegend=show_legend,
                        line=dict(color=colors.get(q, "#AB63FA")),
                    ),
                    row=row, col=col,
                )
        else:
            fig.add_trace(
                go.Scatter(x=[None], y=[None], showlegend=False, hoverinfo='skip'),
                row=row, col=col,
            )
            _n = (row - 1) * n_cols + col
            _s = "" if _n == 1 else str(_n)
            fig.add_annotation(
                text="No data available",
                xref=f"x{_s} domain", yref=f"y{_s} domain",
                x=0.5, y=0.5,
                showarrow=False,
                font=dict(size=12, color="#999"),
            )
        fig.update_yaxes(title_text=ylabel, row=row, col=col)

    fig.update_layout(
        title=title,
        height=270 * n_rows,
    )
    fig.show()

plot_metric(timing_df, "total", "Total State Transition Time")

Slot Processing Time

Time spent processing slots during state transition, from lean_state_transition_slots_processing_time_seconds.

Show code
plot_metric(timing_df, "slots", "Slot Processing Time")

Block Processing Time

Time spent processing blocks during state transition, from lean_state_transition_block_processing_time_seconds.

Show code
plot_metric(timing_df, "block", "Block Processing Time")

Attestation Processing Time

Time spent processing attestations during state transition, from lean_state_transition_attestations_processing_time_seconds.

Show code
plot_metric(timing_df, "attestations", "Attestation Processing Time")

Fork Choice Block Processing Time

Time spent in fork choice during block processing, from lean_fork_choice_block_processing_time_seconds.

Show code
plot_metric(timing_df, "fork_choice", "Fork Choice Block Processing Time")

Attestation Validation Time

Time spent validating individual attestations, from lean_attestation_validation_time_seconds.

Show code
plot_metric(timing_df, "attestation_validation", "Attestation Validation Time")

Summary

Show code
# Summary: average p50 and p95 per client per metric
metric_labels = {
    "total": "Total",
    "slots": "Slots",
    "block": "Block",
    "attestations": "Attestations",
    "fork_choice": "Fork Choice",
    "attestation_validation": "Att. Validation",
}

summary_rows = []
for client in all_clients:
    row = {"Client": client}
    cdf = timing_df[timing_df["client"] == client]
    for metric_key, metric_label in metric_labels.items():
        mdf = cdf[cdf["metric"] == metric_key]
        p50 = mdf[mdf["quantile"] == 0.5]["value_ms"]
        p95 = mdf[mdf["quantile"] == 0.95]["value_ms"]
        if not p50.empty:
            row[f"{metric_label} p50 (ms)"] = f"{p50.mean():.2f}"
        if not p95.empty:
            row[f"{metric_label} p95 (ms)"] = f"{p95.mean():.2f}"
    summary_rows.append(row)

if summary_rows:
    summary_df = pd.DataFrame(summary_rows).set_index("Client").fillna("-")
    display(summary_df)

print(f"\nDevnet: {devnet_id}")
if devnet_info:
    print(f"Duration: {devnet_info['duration_hours']:.1f} hours")
Total p50 (ms) Total p95 (ms) Slots p50 (ms) Slots p95 (ms) Block p50 (ms) Block p95 (ms) Attestations p50 (ms) Attestations p95 (ms) Fork Choice p50 (ms) Fork Choice p95 (ms) Att. Validation p50 (ms) Att. Validation p95 (ms)
Client
ethlambda 125.00 237.50 2.50 4.75 2.52 4.89 2.51 4.84 498.35 932.06 2.50 4.75
grandine - - - - - - - - - - - -
lantern 125.00 237.50 2.50 4.75 2.50 4.75 2.50 4.75 2.50 4.75 2.50 4.75
qlean 125.00 237.50 9.76 15.80 18.38 27.21 16.97 26.74 457.09 947.78 2.50 4.76
ream 2.50 4.75 2.50 4.75 2.50 4.75 2.50 4.75 298.79 1030.90 2.50 4.75
zeam 125.00 237.50 10.52 21.15 2.50 4.75 2.50 4.75 2.50 4.75 2.50 4.75
Devnet: pqdevnet-20260202T1348Z
Duration: 2.1 hours