arb_rpc/
arbdebug.rs

1//! `arbdebug_*` namespace — historical pricing + retryable queue
2//! introspection. Samples ArbOS state at each block in the requested
3//! range.
4
5use std::sync::Arc;
6
7use alloy_consensus::BlockHeader;
8use alloy_primitives::{Address, StorageKey, B256, U256};
9use alloy_rpc_types_eth::BlockNumberOrTag;
10use arb_precompiles::storage_slot::{
11    derive_subspace_key, map_slot, subspace_slot, ARBOS_STATE_ADDRESS, L1_PRICING_SUBSPACE,
12    L2_PRICING_SUBSPACE, RETRYABLES_SUBSPACE, ROOT_STORAGE_KEY,
13};
14use arbos::retryables::{TIMEOUT_OFFSET, TIMEOUT_QUEUE_KEY};
15use jsonrpsee::{
16    core::RpcResult,
17    proc_macros::rpc,
18    types::{error::INTERNAL_ERROR_CODE, ErrorObject},
19};
20use reth_provider::{BlockReaderIdExt, ReceiptProvider, StateProviderFactory};
21use serde::{Deserialize, Serialize};
22
23// Field offsets mirror Nitro's storage layout (arbos/l1_pricing,
24// arbos/l2_pricing, arbos/retryables).
25const L1_PAY_REWARDS_TO_OFFSET: u64 = 0;
26const L1_EQUILIBRATION_UNITS_OFFSET: u64 = 1;
27const L1_INERTIA_OFFSET: u64 = 2;
28const L1_PER_UNIT_REWARD_OFFSET: u64 = 3;
29const L1_LAST_UPDATE_TIME_OFFSET: u64 = 4;
30const L1_FUNDS_DUE_FOR_REWARDS_OFFSET: u64 = 5;
31const L1_UNITS_SINCE_UPDATE_OFFSET: u64 = 6;
32const L1_PRICE_PER_UNIT_OFFSET: u64 = 7;
33const L1_LAST_SURPLUS_OFFSET: u64 = 8;
34const L1_PER_BATCH_GAS_COST_OFFSET: u64 = 9;
35const L1_AMORTIZED_COST_CAP_BIPS_OFFSET: u64 = 10;
36const L1_L1_FEES_AVAILABLE_OFFSET: u64 = 11;
37
38const L2_SPEED_LIMIT_OFFSET: u64 = 0;
39const L2_PER_BLOCK_GAS_LIMIT_OFFSET: u64 = 1;
40const L2_BASE_FEE_OFFSET: u64 = 2;
41const L2_MIN_BASE_FEE_OFFSET: u64 = 3;
42const L2_GAS_BACKLOG_OFFSET: u64 = 4;
43const L2_PRICING_INERTIA_OFFSET: u64 = 5;
44const L2_BACKLOG_TOLERANCE_OFFSET: u64 = 6;
45
46#[derive(Debug, Clone, Serialize, Deserialize)]
47#[serde(rename_all = "camelCase")]
48pub struct PricingModelHistory {
49    pub start: u64,
50    pub end: u64,
51    pub step: u64,
52    pub timestamp: Vec<u64>,
53    pub base_fee: Vec<U256>,
54    pub gas_backlog: Vec<u64>,
55    pub gas_used: Vec<u64>,
56    pub min_base_fee: U256,
57    pub speed_limit: u64,
58    pub per_block_gas_limit: u64,
59    pub per_tx_gas_limit: u64,
60    pub pricing_inertia: u64,
61    pub backlog_tolerance: u64,
62    pub l1_base_fee_estimate: Vec<U256>,
63    pub l1_last_surplus: Vec<U256>,
64    pub l1_funds_due: Vec<U256>,
65    pub l1_funds_due_for_rewards: Vec<U256>,
66    pub l1_units_since_update: Vec<u64>,
67    pub l1_last_update_time: Vec<u64>,
68    pub l1_equilibration_units: U256,
69    pub l1_per_batch_cost: i64,
70    pub l1_amortized_cost_cap_bips: u64,
71    pub l1_pricing_inertia: u64,
72    pub l1_per_unit_reward: u64,
73    pub l1_pay_reward_to: Address,
74}
75
76#[derive(Debug, Clone, Serialize, Deserialize)]
77#[serde(rename_all = "camelCase")]
78pub struct TimeoutQueueHistory {
79    pub start: u64,
80    pub end: u64,
81    pub step: u64,
82    pub timestamp: Vec<u64>,
83    pub size: Vec<u64>,
84}
85
86#[derive(Debug, Clone, Serialize, Deserialize)]
87#[serde(rename_all = "camelCase")]
88pub struct TimeoutQueue {
89    pub block_number: u64,
90    pub tickets: Vec<B256>,
91    pub timeouts: Vec<u64>,
92}
93
94#[rpc(server, namespace = "arbdebug")]
95pub trait ArbDebugApi {
96    #[method(name = "pricingModel")]
97    async fn pricing_model(&self, start: u64, end: u64) -> RpcResult<PricingModelHistory>;
98
99    #[method(name = "timeoutQueueHistory")]
100    async fn timeout_queue_history(&self, start: u64, end: u64) -> RpcResult<TimeoutQueueHistory>;
101
102    #[method(name = "timeoutQueue")]
103    async fn timeout_queue(&self, block_num: u64) -> RpcResult<TimeoutQueue>;
104}
105
106#[derive(Debug, Clone)]
107pub struct ArbDebugConfig {
108    /// Max samples per query. Zero disables arbdebug.
109    pub block_range_bound: u64,
110    /// Max tickets returned from `timeoutQueue`.
111    pub timeout_queue_bound: u64,
112}
113
114impl Default for ArbDebugConfig {
115    fn default() -> Self {
116        Self {
117            block_range_bound: 256,
118            timeout_queue_bound: 256,
119        }
120    }
121}
122
123pub struct ArbDebugHandler<Provider> {
124    provider: Provider,
125    config: Arc<ArbDebugConfig>,
126}
127
128impl<Provider: std::fmt::Debug> std::fmt::Debug for ArbDebugHandler<Provider> {
129    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
130        f.debug_struct("ArbDebugHandler")
131            .field("config", &self.config)
132            .finish_non_exhaustive()
133    }
134}
135
136impl<Provider: Clone> Clone for ArbDebugHandler<Provider> {
137    fn clone(&self) -> Self {
138        Self {
139            provider: self.provider.clone(),
140            config: self.config.clone(),
141        }
142    }
143}
144
145impl<Provider> ArbDebugHandler<Provider> {
146    pub fn new(provider: Provider, config: ArbDebugConfig) -> Self {
147        Self {
148            provider,
149            config: Arc::new(config),
150        }
151    }
152}
153
154fn internal_err(msg: impl std::fmt::Display) -> ErrorObject<'static> {
155    ErrorObject::owned(INTERNAL_ERROR_CODE, msg.to_string(), None::<()>)
156}
157
158/// Sample step-size: matches Nitro's `evenlySpaceBlocks` — if the range
159/// exceeds `bound` blocks, step > 1 so the total number of samples stays
160/// ≤ bound.
161fn compute_step(start: u64, end: u64, bound: u64) -> (u64, u64, u64) {
162    let span = end.saturating_sub(start).saturating_add(1);
163    if span == 0 || bound == 0 {
164        return (start, 1, 0);
165    }
166    let step = if span > bound {
167        span.div_ceil(bound)
168    } else {
169        1
170    };
171    let samples = span.div_ceil(step).min(bound);
172    let first = end.saturating_sub(step.saturating_mul(samples.saturating_sub(1)));
173    (first, step, samples)
174}
175
176impl<Provider> ArbDebugHandler<Provider>
177where
178    Provider: StateProviderFactory + BlockReaderIdExt + ReceiptProvider + Clone + 'static,
179{
180    /// Total gas consumed in the given block, summed from the last
181    /// receipt's `cumulative_gas_used`.
182    fn block_gas_used(&self, block: u64) -> Result<u64, ErrorObject<'static>> {
183        use alloy_consensus::TxReceipt;
184        let receipts = self
185            .provider
186            .receipts_by_block(alloy_eips::BlockHashOrNumber::Number(block))
187            .map_err(internal_err)?
188            .unwrap_or_default();
189        Ok(receipts
190            .last()
191            .map(|r| r.cumulative_gas_used())
192            .unwrap_or(0))
193    }
194
195    fn check_enabled(&self) -> Result<(), ErrorObject<'static>> {
196        if self.config.block_range_bound == 0 {
197            return Err(internal_err("arbdebug disabled (block_range_bound = 0)"));
198        }
199        Ok(())
200    }
201
202    fn validate_range(&self, start: u64, end: u64) -> Result<(), ErrorObject<'static>> {
203        if start > end {
204            return Err(internal_err(format!(
205                "invalid range: start {start} > end {end}"
206            )));
207        }
208        Ok(())
209    }
210
211    fn header_timestamp(&self, block: u64) -> Result<u64, ErrorObject<'static>> {
212        let header = self
213            .provider
214            .sealed_header_by_number_or_tag(BlockNumberOrTag::Number(block))
215            .map_err(internal_err)?
216            .ok_or_else(|| internal_err(format!("block {block} not found")))?;
217        Ok(header.timestamp())
218    }
219
220    fn read_slot(&self, block: u64, slot: U256) -> Result<U256, ErrorObject<'static>> {
221        let state = self
222            .provider
223            .state_by_block_id(BlockNumberOrTag::Number(block).into())
224            .map_err(internal_err)?;
225        let k = StorageKey::from(B256::from(slot.to_be_bytes::<32>()));
226        Ok(state
227            .storage(ARBOS_STATE_ADDRESS, k)
228            .map_err(internal_err)?
229            .unwrap_or(U256::ZERO))
230    }
231
232    fn read_l1_field(&self, block: u64, offset: u64) -> Result<U256, ErrorObject<'static>> {
233        self.read_slot(block, subspace_slot(L1_PRICING_SUBSPACE, offset))
234    }
235
236    fn read_l2_field(&self, block: u64, offset: u64) -> Result<U256, ErrorObject<'static>> {
237        self.read_slot(block, subspace_slot(L2_PRICING_SUBSPACE, offset))
238    }
239
240    /// Storage key for the retryable timeout queue's body.
241    fn retryable_queue_storage_key() -> B256 {
242        let retryables = derive_subspace_key(ROOT_STORAGE_KEY, RETRYABLES_SUBSPACE);
243        derive_subspace_key(retryables.as_slice(), TIMEOUT_QUEUE_KEY)
244    }
245
246    /// Size of the timeout queue at the given block.
247    fn queue_size_at(&self, block: u64) -> Result<u64, ErrorObject<'static>> {
248        // Queue layout: the queue's own storage has offset-0 = next_put,
249        // offset-1 = next_get. `size = next_put - next_get`.
250        let qk = Self::retryable_queue_storage_key();
251        let put = self
252            .read_slot(block, map_slot(qk.as_slice(), 0))?
253            .try_into()
254            .unwrap_or(0u64);
255        let get = self
256            .read_slot(block, map_slot(qk.as_slice(), 1))?
257            .try_into()
258            .unwrap_or(0u64);
259        Ok(put.saturating_sub(get))
260    }
261
262    /// Enumerate `(ticket_id, timeout)` for every pending retryable
263    /// in the timeout queue at the given block, up to `max_entries`.
264    /// Reads are against the historic state for `block` via the
265    /// StateProvider — no ArbosState instantiation required.
266    fn queue_snapshot_at(
267        &self,
268        block: u64,
269        max_entries: usize,
270    ) -> Result<Vec<(B256, u64)>, ErrorObject<'static>> {
271        let qk = Self::retryable_queue_storage_key();
272        let put: u64 = self
273            .read_slot(block, map_slot(qk.as_slice(), 0))?
274            .try_into()
275            .unwrap_or(0);
276        let get: u64 = self
277            .read_slot(block, map_slot(qk.as_slice(), 1))?
278            .try_into()
279            .unwrap_or(0);
280        let retryables_key = derive_subspace_key(ROOT_STORAGE_KEY, RETRYABLES_SUBSPACE);
281        let mut out = Vec::new();
282        for idx in get..put {
283            if out.len() >= max_entries {
284                break;
285            }
286            let ticket_slot = map_slot(qk.as_slice(), idx);
287            let ticket_word = self.read_slot(block, ticket_slot)?;
288            let id = B256::from(ticket_word.to_be_bytes::<32>());
289            if id == B256::ZERO {
290                continue;
291            }
292            // Per-retryable storage is keyed by ticket_id as a subspace
293            // of the retryables subspace. Read the timeout field.
294            let ret_key = derive_subspace_key(retryables_key.as_slice(), id.as_slice());
295            let timeout: u64 = self
296                .read_slot(block, map_slot(ret_key.as_slice(), TIMEOUT_OFFSET))?
297                .try_into()
298                .unwrap_or(0);
299            if timeout == 0 {
300                continue;
301            }
302            out.push((id, timeout));
303        }
304        Ok(out)
305    }
306}
307
308#[async_trait::async_trait]
309impl<Provider> ArbDebugApiServer for ArbDebugHandler<Provider>
310where
311    Provider:
312        StateProviderFactory + BlockReaderIdExt + ReceiptProvider + Clone + Send + Sync + 'static,
313{
314    async fn pricing_model(&self, start: u64, end: u64) -> RpcResult<PricingModelHistory> {
315        self.check_enabled()?;
316        self.validate_range(start, end)?;
317        let (first, step, samples) = compute_step(start, end, self.config.block_range_bound);
318
319        let mut timestamp = Vec::with_capacity(samples as usize);
320        let mut base_fee = Vec::with_capacity(samples as usize);
321        let mut gas_backlog = Vec::with_capacity(samples as usize);
322        let mut gas_used = Vec::with_capacity(samples as usize);
323        let mut l1_base_fee_estimate = Vec::with_capacity(samples as usize);
324        let mut l1_last_surplus = Vec::with_capacity(samples as usize);
325        let mut l1_funds_due = Vec::with_capacity(samples as usize);
326        let mut l1_funds_due_for_rewards = Vec::with_capacity(samples as usize);
327        let mut l1_units_since_update = Vec::with_capacity(samples as usize);
328        let mut l1_last_update_time = Vec::with_capacity(samples as usize);
329
330        for i in 0..samples {
331            let b = first + step * i;
332            timestamp.push(self.header_timestamp(b)?);
333            base_fee.push(self.read_l2_field(b, L2_BASE_FEE_OFFSET)?);
334            gas_backlog.push(
335                self.read_l2_field(b, L2_GAS_BACKLOG_OFFSET)?
336                    .try_into()
337                    .unwrap_or(0u64),
338            );
339            gas_used.push(self.block_gas_used(b)?);
340            l1_base_fee_estimate.push(self.read_l1_field(b, L1_PRICE_PER_UNIT_OFFSET)?);
341            l1_last_surplus.push(self.read_l1_field(b, L1_LAST_SURPLUS_OFFSET)?);
342            l1_funds_due.push(self.read_l1_field(b, L1_L1_FEES_AVAILABLE_OFFSET)?);
343            l1_funds_due_for_rewards.push(self.read_l1_field(b, L1_FUNDS_DUE_FOR_REWARDS_OFFSET)?);
344            l1_units_since_update.push(
345                self.read_l1_field(b, L1_UNITS_SINCE_UPDATE_OFFSET)?
346                    .try_into()
347                    .unwrap_or(0u64),
348            );
349            l1_last_update_time.push(
350                self.read_l1_field(b, L1_LAST_UPDATE_TIME_OFFSET)?
351                    .try_into()
352                    .unwrap_or(0u64),
353            );
354        }
355
356        // Scalar fields — read once at `end`.
357        let min_base_fee = self.read_l2_field(end, L2_MIN_BASE_FEE_OFFSET)?;
358        let speed_limit = self
359            .read_l2_field(end, L2_SPEED_LIMIT_OFFSET)?
360            .try_into()
361            .unwrap_or(0u64);
362        let per_block_gas_limit = self
363            .read_l2_field(end, L2_PER_BLOCK_GAS_LIMIT_OFFSET)?
364            .try_into()
365            .unwrap_or(0u64);
366        let pricing_inertia = self
367            .read_l2_field(end, L2_PRICING_INERTIA_OFFSET)?
368            .try_into()
369            .unwrap_or(0u64);
370        let backlog_tolerance = self
371            .read_l2_field(end, L2_BACKLOG_TOLERANCE_OFFSET)?
372            .try_into()
373            .unwrap_or(0u64);
374        let l1_equilibration_units = self.read_l1_field(end, L1_EQUILIBRATION_UNITS_OFFSET)?;
375        let l1_per_batch_cost: i64 = self
376            .read_l1_field(end, L1_PER_BATCH_GAS_COST_OFFSET)?
377            .try_into()
378            .unwrap_or(0i64);
379        let l1_amortized_cost_cap_bips = self
380            .read_l1_field(end, L1_AMORTIZED_COST_CAP_BIPS_OFFSET)?
381            .try_into()
382            .unwrap_or(0u64);
383        let l1_pricing_inertia = self
384            .read_l1_field(end, L1_INERTIA_OFFSET)?
385            .try_into()
386            .unwrap_or(0u64);
387        let l1_per_unit_reward = self
388            .read_l1_field(end, L1_PER_UNIT_REWARD_OFFSET)?
389            .try_into()
390            .unwrap_or(0u64);
391        let l1_pay_reward_to = {
392            let word = self.read_l1_field(end, L1_PAY_REWARDS_TO_OFFSET)?;
393            Address::from_slice(&word.to_be_bytes::<32>()[12..])
394        };
395
396        Ok(PricingModelHistory {
397            start,
398            end,
399            step,
400            timestamp,
401            base_fee,
402            gas_backlog,
403            gas_used,
404            min_base_fee,
405            speed_limit,
406            per_block_gas_limit,
407            per_tx_gas_limit: 0,
408            pricing_inertia,
409            backlog_tolerance,
410            l1_base_fee_estimate,
411            l1_last_surplus,
412            l1_funds_due,
413            l1_funds_due_for_rewards,
414            l1_units_since_update,
415            l1_last_update_time,
416            l1_equilibration_units,
417            l1_per_batch_cost,
418            l1_amortized_cost_cap_bips,
419            l1_pricing_inertia,
420            l1_per_unit_reward,
421            l1_pay_reward_to,
422        })
423    }
424
425    async fn timeout_queue_history(&self, start: u64, end: u64) -> RpcResult<TimeoutQueueHistory> {
426        self.check_enabled()?;
427        self.validate_range(start, end)?;
428        let (first, step, samples) = compute_step(start, end, self.config.block_range_bound);
429
430        let mut timestamp = Vec::with_capacity(samples as usize);
431        let mut size = Vec::with_capacity(samples as usize);
432        for i in 0..samples {
433            let b = first + step * i;
434            timestamp.push(self.header_timestamp(b)?);
435            size.push(self.queue_size_at(b)?);
436        }
437        Ok(TimeoutQueueHistory {
438            start,
439            end,
440            step,
441            timestamp,
442            size,
443        })
444    }
445
446    async fn timeout_queue(&self, block_num: u64) -> RpcResult<TimeoutQueue> {
447        self.check_enabled()?;
448        let entries =
449            self.queue_snapshot_at(block_num, self.config.timeout_queue_bound as usize)?;
450        let (tickets, timeouts): (Vec<B256>, Vec<u64>) = entries.into_iter().unzip();
451        Ok(TimeoutQueue {
452            block_number: block_num,
453            tickets,
454            timeouts,
455        })
456    }
457}
458
459#[cfg(test)]
460mod tests {
461    use super::*;
462
463    #[test]
464    fn compute_step_span_fits_bound() {
465        let (first, step, samples) = compute_step(100, 109, 256);
466        assert_eq!(first, 100);
467        assert_eq!(step, 1);
468        assert_eq!(samples, 10);
469    }
470
471    #[test]
472    fn compute_step_span_exceeds_bound() {
473        let (first, step, samples) = compute_step(0, 9999, 100);
474        assert!(samples <= 100);
475        assert!(step >= 100);
476        // Should anchor last sample at `end`.
477        assert_eq!(first + step * (samples - 1), 9999);
478    }
479
480    #[test]
481    fn compute_step_single_block() {
482        let (first, step, samples) = compute_step(42, 42, 256);
483        assert_eq!(first, 42);
484        assert_eq!(step, 1);
485        assert_eq!(samples, 1);
486    }
487
488    #[test]
489    fn compute_step_zero_bound() {
490        let (_, step, samples) = compute_step(0, 10, 0);
491        assert_eq!(step, 1);
492        assert_eq!(samples, 0);
493    }
494}