iota_node/
lib.rs

1// Copyright (c) Mysten Labs, Inc.
2// Modifications Copyright (c) 2024 IOTA Stiftung
3// SPDX-License-Identifier: Apache-2.0
4
5#[cfg(msim)]
6use std::sync::atomic::Ordering;
7use std::{
8    collections::{BTreeSet, HashMap, HashSet},
9    fmt,
10    future::Future,
11    path::PathBuf,
12    str::FromStr,
13    sync::{Arc, Weak},
14    time::Duration,
15};
16
17use anemo::Network;
18use anemo_tower::{
19    callback::CallbackLayer,
20    trace::{DefaultMakeSpan, DefaultOnFailure, TraceLayer},
21};
22use anyhow::{Result, anyhow};
23use arc_swap::ArcSwap;
24use fastcrypto_zkp::bn254::zk_login::{JWK, JwkId, OIDCProvider};
25use futures::future::BoxFuture;
26pub use handle::IotaNodeHandle;
27use iota_archival::{reader::ArchiveReaderBalancer, writer::ArchiveWriter};
28use iota_common::debug_fatal;
29use iota_config::{
30    ConsensusConfig, NodeConfig,
31    node::{DBCheckpointConfig, RunWithRange},
32    node_config_metrics::NodeConfigMetrics,
33    object_storage_config::{ObjectStoreConfig, ObjectStoreType},
34};
35use iota_core::{
36    authority::{
37        AuthorityState, AuthorityStore, RandomnessRoundReceiver,
38        authority_per_epoch_store::AuthorityPerEpochStore,
39        authority_store_pruner::ObjectsCompactionFilter,
40        authority_store_tables::{
41            AuthorityPerpetualTables, AuthorityPerpetualTablesOptions, AuthorityPrunerTables,
42        },
43        backpressure::BackpressureManager,
44        epoch_start_configuration::{EpochFlag, EpochStartConfigTrait, EpochStartConfiguration},
45    },
46    authority_aggregator::{
47        AggregatorSendCapabilityNotificationError, AuthAggMetrics, AuthorityAggregator,
48    },
49    authority_client::NetworkAuthorityClient,
50    authority_server::{ValidatorService, ValidatorServiceMetrics},
51    checkpoints::{
52        CheckpointMetrics, CheckpointService, CheckpointStore, SendCheckpointToStateSync,
53        SubmitCheckpointToConsensus,
54        checkpoint_executor::{CheckpointExecutor, StopReason, metrics::CheckpointExecutorMetrics},
55    },
56    connection_monitor::ConnectionMonitor,
57    consensus_adapter::{
58        CheckConnection, ConnectionMonitorStatus, ConsensusAdapter, ConsensusAdapterMetrics,
59        ConsensusClient,
60    },
61    consensus_handler::ConsensusHandlerInitializer,
62    consensus_manager::{ConsensusManager, ConsensusManagerTrait, UpdatableConsensusClient},
63    consensus_validator::{IotaTxValidator, IotaTxValidatorMetrics},
64    db_checkpoint_handler::DBCheckpointHandler,
65    epoch::{
66        committee_store::CommitteeStore, consensus_store_pruner::ConsensusStorePruner,
67        epoch_metrics::EpochMetrics, randomness::RandomnessManager,
68        reconfiguration::ReconfigurationInitiator,
69    },
70    execution_cache::build_execution_cache,
71    jsonrpc_index::IndexStore,
72    module_cache_metrics::ResolverMetrics,
73    overload_monitor::overload_monitor,
74    rest_index::RestIndexStore,
75    safe_client::SafeClientMetricsBase,
76    signature_verifier::SignatureVerifierMetrics,
77    state_accumulator::{StateAccumulator, StateAccumulatorMetrics},
78    storage::{RestReadStore, RocksDbStore},
79    traffic_controller::metrics::TrafficControllerMetrics,
80    transaction_orchestrator::TransactionOrchestrator,
81    validator_tx_finalizer::ValidatorTxFinalizer,
82};
83use iota_grpc_server::{GrpcReader, GrpcServerHandle, start_grpc_server};
84use iota_json_rpc::{
85    JsonRpcServerBuilder, coin_api::CoinReadApi, governance_api::GovernanceReadApi,
86    indexer_api::IndexerApi, move_utils::MoveUtils, read_api::ReadApi,
87    transaction_builder_api::TransactionBuilderApi,
88    transaction_execution_api::TransactionExecutionApi,
89};
90use iota_json_rpc_api::JsonRpcMetrics;
91use iota_macros::{fail_point, fail_point_async, replay_log};
92use iota_metrics::{
93    RegistryID, RegistryService,
94    hardware_metrics::register_hardware_metrics,
95    metrics_network::{MetricsMakeCallbackHandler, NetworkConnectionMetrics, NetworkMetrics},
96    server_timing_middleware, spawn_monitored_task,
97};
98use iota_names::config::IotaNamesConfig;
99use iota_network::{
100    api::ValidatorServer, discovery, discovery::TrustedPeerChangeEvent, randomness, state_sync,
101};
102use iota_network_stack::server::{IOTA_TLS_SERVER_NAME, ServerBuilder};
103use iota_protocol_config::ProtocolConfig;
104use iota_rest_api::RestMetrics;
105use iota_sdk_types::crypto::{Intent, IntentMessage, IntentScope};
106use iota_snapshot::uploader::StateSnapshotUploader;
107use iota_storage::{
108    FileCompression, StorageFormat,
109    http_key_value_store::HttpKVStore,
110    key_value_store::{FallbackTransactionKVStore, TransactionKeyValueStore},
111    key_value_store_metrics::KeyValueStoreMetrics,
112};
113use iota_types::{
114    base_types::{AuthorityName, ConciseableName, EpochId},
115    committee::Committee,
116    crypto::{AuthoritySignature, IotaAuthoritySignature, KeypairTraits, RandomnessRound},
117    digests::ChainIdentifier,
118    error::{IotaError, IotaResult},
119    executable_transaction::VerifiedExecutableTransaction,
120    execution_config_utils::to_binary_config,
121    full_checkpoint_content::CheckpointData,
122    iota_system_state::{
123        IotaSystemState, IotaSystemStateTrait,
124        epoch_start_iota_system_state::{EpochStartSystemState, EpochStartSystemStateTrait},
125    },
126    messages_consensus::{
127        AuthorityCapabilitiesV1, ConsensusTransaction, ConsensusTransactionKind,
128        SignedAuthorityCapabilitiesV1, check_total_jwk_size,
129    },
130    messages_grpc::HandleCapabilityNotificationRequestV1,
131    quorum_driver_types::QuorumDriverEffectsQueueResult,
132    supported_protocol_versions::SupportedProtocolVersions,
133    transaction::{Transaction, VerifiedCertificate},
134};
135use prometheus::Registry;
136#[cfg(msim)]
137pub use simulator::set_jwk_injector;
138#[cfg(msim)]
139use simulator::*;
140use tap::tap::TapFallible;
141use tokio::{
142    runtime::Handle,
143    sync::{Mutex, broadcast, mpsc, watch},
144    task::{JoinHandle, JoinSet},
145};
146use tokio_util::sync::CancellationToken;
147use tower::ServiceBuilder;
148use tracing::{Instrument, debug, error, error_span, info, trace_span, warn};
149use typed_store::{
150    DBMetrics,
151    rocks::{check_and_mark_db_corruption, default_db_options, unmark_db_corruption},
152};
153
154use crate::metrics::{GrpcMetrics, IotaNodeMetrics};
155
156pub mod admin;
157mod handle;
158pub mod metrics;
159
160pub struct ValidatorComponents {
161    validator_server_handle: SpawnOnce,
162    validator_overload_monitor_handle: Option<JoinHandle<()>>,
163    consensus_manager: ConsensusManager,
164    consensus_store_pruner: ConsensusStorePruner,
165    consensus_adapter: Arc<ConsensusAdapter>,
166    // Keeping the handle to the checkpoint service tasks to shut them down during reconfiguration.
167    checkpoint_service_tasks: JoinSet<()>,
168    checkpoint_metrics: Arc<CheckpointMetrics>,
169    iota_tx_validator_metrics: Arc<IotaTxValidatorMetrics>,
170    validator_registry_id: RegistryID,
171}
172
173#[cfg(msim)]
174mod simulator {
175    use std::sync::atomic::AtomicBool;
176
177    use super::*;
178
179    pub(super) struct SimState {
180        pub sim_node: iota_simulator::runtime::NodeHandle,
181        pub sim_safe_mode_expected: AtomicBool,
182        _leak_detector: iota_simulator::NodeLeakDetector,
183    }
184
185    impl Default for SimState {
186        fn default() -> Self {
187            Self {
188                sim_node: iota_simulator::runtime::NodeHandle::current(),
189                sim_safe_mode_expected: AtomicBool::new(false),
190                _leak_detector: iota_simulator::NodeLeakDetector::new(),
191            }
192        }
193    }
194
195    type JwkInjector = dyn Fn(AuthorityName, &OIDCProvider) -> IotaResult<Vec<(JwkId, JWK)>>
196        + Send
197        + Sync
198        + 'static;
199
200    fn default_fetch_jwks(
201        _authority: AuthorityName,
202        _provider: &OIDCProvider,
203    ) -> IotaResult<Vec<(JwkId, JWK)>> {
204        use fastcrypto_zkp::bn254::zk_login::parse_jwks;
205        // Just load a default Twitch jwk for testing.
206        parse_jwks(
207            iota_types::zk_login_util::DEFAULT_JWK_BYTES,
208            &OIDCProvider::Twitch,
209        )
210        .map_err(|_| IotaError::JWKRetrieval)
211    }
212
213    thread_local! {
214        static JWK_INJECTOR: std::cell::RefCell<Arc<JwkInjector>> = std::cell::RefCell::new(Arc::new(default_fetch_jwks));
215    }
216
217    pub(super) fn get_jwk_injector() -> Arc<JwkInjector> {
218        JWK_INJECTOR.with(|injector| injector.borrow().clone())
219    }
220
221    pub fn set_jwk_injector(injector: Arc<JwkInjector>) {
222        JWK_INJECTOR.with(|cell| *cell.borrow_mut() = injector);
223    }
224}
225
226pub struct IotaNode {
227    config: NodeConfig,
228    validator_components: Mutex<Option<ValidatorComponents>>,
229    /// The http server responsible for serving JSON-RPC as well as the
230    /// experimental rest service
231    _http_server: Option<iota_http::ServerHandle>,
232    state: Arc<AuthorityState>,
233    transaction_orchestrator: Option<Arc<TransactionOrchestrator<NetworkAuthorityClient>>>,
234    registry_service: RegistryService,
235    metrics: Arc<IotaNodeMetrics>,
236
237    _discovery: discovery::Handle,
238    state_sync_handle: state_sync::Handle,
239    randomness_handle: randomness::Handle,
240    checkpoint_store: Arc<CheckpointStore>,
241    accumulator: Mutex<Option<Arc<StateAccumulator>>>,
242    connection_monitor_status: Arc<ConnectionMonitorStatus>,
243
244    /// Broadcast channel to send the starting system state for the next epoch.
245    end_of_epoch_channel: broadcast::Sender<IotaSystemState>,
246
247    /// Broadcast channel to notify [`DiscoveryEventLoop`] for new validator
248    /// peers.
249    trusted_peer_change_tx: watch::Sender<TrustedPeerChangeEvent>,
250
251    backpressure_manager: Arc<BackpressureManager>,
252
253    _db_checkpoint_handle: Option<tokio::sync::broadcast::Sender<()>>,
254
255    #[cfg(msim)]
256    sim_state: SimState,
257
258    _state_archive_handle: Option<broadcast::Sender<()>>,
259
260    _state_snapshot_uploader_handle: Option<broadcast::Sender<()>>,
261    // Channel to allow signaling upstream to shutdown iota-node
262    shutdown_channel_tx: broadcast::Sender<Option<RunWithRange>>,
263
264    /// Handle to the gRPC server for gRPC streaming and graceful shutdown
265    grpc_server_handle: Mutex<Option<GrpcServerHandle>>,
266
267    /// AuthorityAggregator of the network, created at start and beginning of
268    /// each epoch. Use ArcSwap so that we could mutate it without taking
269    /// mut reference.
270    // TODO: Eventually we can make this auth aggregator a shared reference so that this
271    // update will automatically propagate to other uses.
272    auth_agg: Arc<ArcSwap<AuthorityAggregator<NetworkAuthorityClient>>>,
273}
274
275impl fmt::Debug for IotaNode {
276    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
277        f.debug_struct("IotaNode")
278            .field("name", &self.state.name.concise())
279            .finish()
280    }
281}
282
283static MAX_JWK_KEYS_PER_FETCH: usize = 100;
284
285impl IotaNode {
286    pub async fn start(
287        config: NodeConfig,
288        registry_service: RegistryService,
289        custom_rpc_runtime: Option<Handle>,
290    ) -> Result<Arc<IotaNode>> {
291        Self::start_async(config, registry_service, custom_rpc_runtime, "unknown").await
292    }
293
294    /// Starts the JWK (JSON Web Key) updater tasks for the specified node
295    /// configuration.
296    /// This function ensures continuous fetching, validation, and submission of
297    /// JWKs, maintaining up-to-date keys for the specified providers.
298    fn start_jwk_updater(
299        config: &NodeConfig,
300        metrics: Arc<IotaNodeMetrics>,
301        authority: AuthorityName,
302        epoch_store: Arc<AuthorityPerEpochStore>,
303        consensus_adapter: Arc<ConsensusAdapter>,
304    ) {
305        let epoch = epoch_store.epoch();
306
307        let supported_providers = config
308            .zklogin_oauth_providers
309            .get(&epoch_store.get_chain_identifier().chain())
310            .unwrap_or(&BTreeSet::new())
311            .iter()
312            .map(|s| OIDCProvider::from_str(s).expect("Invalid provider string"))
313            .collect::<Vec<_>>();
314
315        let fetch_interval = Duration::from_secs(config.jwk_fetch_interval_seconds);
316
317        info!(
318            ?fetch_interval,
319            "Starting JWK updater tasks with supported providers: {:?}", supported_providers
320        );
321
322        fn validate_jwk(
323            metrics: &Arc<IotaNodeMetrics>,
324            provider: &OIDCProvider,
325            id: &JwkId,
326            jwk: &JWK,
327        ) -> bool {
328            let Ok(iss_provider) = OIDCProvider::from_iss(&id.iss) else {
329                warn!(
330                    "JWK iss {:?} (retrieved from {:?}) is not a valid provider",
331                    id.iss, provider
332                );
333                metrics
334                    .invalid_jwks
335                    .with_label_values(&[&provider.to_string()])
336                    .inc();
337                return false;
338            };
339
340            if iss_provider != *provider {
341                warn!(
342                    "JWK iss {:?} (retrieved from {:?}) does not match provider {:?}",
343                    id.iss, provider, iss_provider
344                );
345                metrics
346                    .invalid_jwks
347                    .with_label_values(&[&provider.to_string()])
348                    .inc();
349                return false;
350            }
351
352            if !check_total_jwk_size(id, jwk) {
353                warn!("JWK {:?} (retrieved from {:?}) is too large", id, provider);
354                metrics
355                    .invalid_jwks
356                    .with_label_values(&[&provider.to_string()])
357                    .inc();
358                return false;
359            }
360
361            true
362        }
363
364        // metrics is:
365        //  pub struct IotaNodeMetrics {
366        //      pub jwk_requests: IntCounterVec,
367        //      pub jwk_request_errors: IntCounterVec,
368        //      pub total_jwks: IntCounterVec,
369        //      pub unique_jwks: IntCounterVec,
370        //  }
371
372        for p in supported_providers.into_iter() {
373            let provider_str = p.to_string();
374            let epoch_store = epoch_store.clone();
375            let consensus_adapter = consensus_adapter.clone();
376            let metrics = metrics.clone();
377            spawn_monitored_task!(epoch_store.clone().within_alive_epoch(
378                async move {
379                    // note: restart-safe de-duplication happens after consensus, this is
380                    // just best-effort to reduce unneeded submissions.
381                    let mut seen = HashSet::new();
382                    loop {
383                        info!("fetching JWK for provider {:?}", p);
384                        metrics.jwk_requests.with_label_values(&[&provider_str]).inc();
385                        match Self::fetch_jwks(authority, &p).await {
386                            Err(e) => {
387                                metrics.jwk_request_errors.with_label_values(&[&provider_str]).inc();
388                                warn!("Error when fetching JWK for provider {:?} {:?}", p, e);
389                                // Retry in 30 seconds
390                                tokio::time::sleep(Duration::from_secs(30)).await;
391                                continue;
392                            }
393                            Ok(mut keys) => {
394                                metrics.total_jwks
395                                    .with_label_values(&[&provider_str])
396                                    .inc_by(keys.len() as u64);
397
398                                keys.retain(|(id, jwk)| {
399                                    validate_jwk(&metrics, &p, id, jwk) &&
400                                    !epoch_store.jwk_active_in_current_epoch(id, jwk) &&
401                                    seen.insert((id.clone(), jwk.clone()))
402                                });
403
404                                metrics.unique_jwks
405                                    .with_label_values(&[&provider_str])
406                                    .inc_by(keys.len() as u64);
407
408                                // prevent oauth providers from sending too many keys,
409                                // inadvertently or otherwise
410                                if keys.len() > MAX_JWK_KEYS_PER_FETCH {
411                                    warn!("Provider {:?} sent too many JWKs, only the first {} will be used", p, MAX_JWK_KEYS_PER_FETCH);
412                                    keys.truncate(MAX_JWK_KEYS_PER_FETCH);
413                                }
414
415                                for (id, jwk) in keys.into_iter() {
416                                    info!("Submitting JWK to consensus: {:?}", id);
417
418                                    let txn = ConsensusTransaction::new_jwk_fetched(authority, id, jwk);
419                                    consensus_adapter.submit(txn, None, &epoch_store)
420                                        .tap_err(|e| warn!("Error when submitting JWKs to consensus {:?}", e))
421                                        .ok();
422                                }
423                            }
424                        }
425                        tokio::time::sleep(fetch_interval).await;
426                    }
427                }
428                .instrument(error_span!("jwk_updater_task", epoch)),
429            ));
430        }
431    }
432
433    pub async fn start_async(
434        config: NodeConfig,
435        registry_service: RegistryService,
436        custom_rpc_runtime: Option<Handle>,
437        software_version: &'static str,
438    ) -> Result<Arc<IotaNode>> {
439        NodeConfigMetrics::new(&registry_service.default_registry()).record_metrics(&config);
440        let mut config = config.clone();
441        if config.supported_protocol_versions.is_none() {
442            info!(
443                "populating config.supported_protocol_versions with default {:?}",
444                SupportedProtocolVersions::SYSTEM_DEFAULT
445            );
446            config.supported_protocol_versions = Some(SupportedProtocolVersions::SYSTEM_DEFAULT);
447        }
448
449        let run_with_range = config.run_with_range;
450        let is_validator = config.consensus_config().is_some();
451        let is_full_node = !is_validator;
452        let prometheus_registry = registry_service.default_registry();
453
454        info!(node =? config.authority_public_key(),
455            "Initializing iota-node listening on {}", config.network_address
456        );
457
458        let genesis = config.genesis()?.clone();
459
460        let chain_identifier = ChainIdentifier::from(*genesis.checkpoint().digest());
461        info!("IOTA chain identifier: {chain_identifier}");
462
463        // Check and set the db_corrupted flag
464        let db_corrupted_path = &config.db_path().join("status");
465        if let Err(err) = check_and_mark_db_corruption(db_corrupted_path) {
466            panic!("Failed to check database corruption: {err}");
467        }
468
469        // Initialize metrics to track db usage before creating any stores
470        DBMetrics::init(&prometheus_registry);
471
472        // Initialize IOTA metrics.
473        iota_metrics::init_metrics(&prometheus_registry);
474        // Unsupported (because of the use of static variable) and unnecessary in
475        // simtests.
476        #[cfg(not(msim))]
477        iota_metrics::thread_stall_monitor::start_thread_stall_monitor();
478
479        // Register hardware metrics.
480        register_hardware_metrics(&registry_service, &config.db_path)
481            .expect("Failed registering hardware metrics");
482        // Register uptime metric
483        prometheus_registry
484            .register(iota_metrics::uptime_metric(
485                if is_validator {
486                    "validator"
487                } else {
488                    "fullnode"
489                },
490                software_version,
491                &chain_identifier.to_string(),
492            ))
493            .expect("Failed registering uptime metric");
494
495        // If genesis come with some migration data then load them into memory from the
496        // file path specified in config.
497        let migration_tx_data = if genesis.contains_migrations() {
498            // Here the load already verifies that the content of the migration blob is
499            // valid in respect to the content found in genesis
500            Some(config.load_migration_tx_data()?)
501        } else {
502            None
503        };
504
505        let secret = Arc::pin(config.authority_key_pair().copy());
506        let genesis_committee = genesis.committee()?;
507        let committee_store = Arc::new(CommitteeStore::new(
508            config.db_path().join("epochs"),
509            &genesis_committee,
510            None,
511        ));
512
513        let mut pruner_db = None;
514        if config
515            .authority_store_pruning_config
516            .enable_compaction_filter
517        {
518            pruner_db = Some(Arc::new(AuthorityPrunerTables::open(
519                &config.db_path().join("store"),
520            )));
521        }
522        let compaction_filter = pruner_db
523            .clone()
524            .map(|db| ObjectsCompactionFilter::new(db, &prometheus_registry));
525
526        // By default, only enable write stall on validators for perpetual db.
527        let enable_write_stall = config.enable_db_write_stall.unwrap_or(is_validator);
528        let perpetual_tables_options = AuthorityPerpetualTablesOptions {
529            enable_write_stall,
530            compaction_filter,
531        };
532        let perpetual_tables = Arc::new(AuthorityPerpetualTables::open(
533            &config.db_path().join("store"),
534            Some(perpetual_tables_options),
535        ));
536        let is_genesis = perpetual_tables
537            .database_is_empty()
538            .expect("Database read should not fail at init.");
539        let checkpoint_store = CheckpointStore::new(&config.db_path().join("checkpoints"));
540        let backpressure_manager =
541            BackpressureManager::new_from_checkpoint_store(&checkpoint_store);
542
543        let store = AuthorityStore::open(
544            perpetual_tables,
545            &genesis,
546            &config,
547            &prometheus_registry,
548            migration_tx_data.as_ref(),
549        )
550        .await?;
551
552        let cur_epoch = store.get_recovery_epoch_at_restart()?;
553        let committee = committee_store
554            .get_committee(&cur_epoch)?
555            .expect("Committee of the current epoch must exist");
556        let epoch_start_configuration = store
557            .get_epoch_start_configuration()?
558            .expect("EpochStartConfiguration of the current epoch must exist");
559        let cache_metrics = Arc::new(ResolverMetrics::new(&prometheus_registry));
560        let signature_verifier_metrics = SignatureVerifierMetrics::new(&prometheus_registry);
561
562        let cache_traits = build_execution_cache(
563            &config.execution_cache_config,
564            &epoch_start_configuration,
565            &prometheus_registry,
566            &store,
567            backpressure_manager.clone(),
568        );
569
570        let auth_agg = {
571            let safe_client_metrics_base = SafeClientMetricsBase::new(&prometheus_registry);
572            let auth_agg_metrics = Arc::new(AuthAggMetrics::new(&prometheus_registry));
573            Arc::new(ArcSwap::new(Arc::new(
574                AuthorityAggregator::new_from_epoch_start_state(
575                    epoch_start_configuration.epoch_start_state(),
576                    &committee_store,
577                    safe_client_metrics_base,
578                    auth_agg_metrics,
579                ),
580            )))
581        };
582
583        let chain_id = ChainIdentifier::from(*genesis.checkpoint().digest());
584        let chain = match config.chain_override_for_testing {
585            Some(chain) => chain,
586            None => ChainIdentifier::from(*genesis.checkpoint().digest()).chain(),
587        };
588
589        let epoch_options = default_db_options().optimize_db_for_write_throughput(4);
590        let epoch_store = AuthorityPerEpochStore::new(
591            config.authority_public_key(),
592            committee.clone(),
593            &config.db_path().join("store"),
594            Some(epoch_options.options),
595            EpochMetrics::new(&registry_service.default_registry()),
596            epoch_start_configuration,
597            cache_traits.backing_package_store.clone(),
598            cache_traits.object_store.clone(),
599            cache_metrics,
600            signature_verifier_metrics,
601            &config.expensive_safety_check_config,
602            (chain_id, chain),
603            checkpoint_store
604                .get_highest_executed_checkpoint_seq_number()
605                .expect("checkpoint store read cannot fail")
606                .unwrap_or(0),
607        )?;
608
609        info!("created epoch store");
610
611        replay_log!(
612            "Beginning replay run. Epoch: {:?}, Protocol config: {:?}",
613            epoch_store.epoch(),
614            epoch_store.protocol_config()
615        );
616
617        // the database is empty at genesis time
618        if is_genesis {
619            info!("checking IOTA conservation at genesis");
620            // When we are opening the db table, the only time when it's safe to
621            // check IOTA conservation is at genesis. Otherwise we may be in the middle of
622            // an epoch and the IOTA conservation check will fail. This also initialize
623            // the expected_network_iota_amount table.
624            cache_traits
625                .reconfig_api
626                .try_expensive_check_iota_conservation(&epoch_store, None)
627                .expect("IOTA conservation check cannot fail at genesis");
628        }
629
630        let effective_buffer_stake = epoch_store.get_effective_buffer_stake_bps();
631        let default_buffer_stake = epoch_store
632            .protocol_config()
633            .buffer_stake_for_protocol_upgrade_bps();
634        if effective_buffer_stake != default_buffer_stake {
635            warn!(
636                ?effective_buffer_stake,
637                ?default_buffer_stake,
638                "buffer_stake_for_protocol_upgrade_bps is currently overridden"
639            );
640        }
641
642        checkpoint_store.insert_genesis_checkpoint(
643            genesis.checkpoint(),
644            genesis.checkpoint_contents().clone(),
645            &epoch_store,
646        );
647
648        // Database has everything from genesis, set corrupted key to 0
649        unmark_db_corruption(db_corrupted_path)?;
650
651        info!("creating state sync store");
652        let state_sync_store = RocksDbStore::new(
653            cache_traits.clone(),
654            committee_store.clone(),
655            checkpoint_store.clone(),
656        );
657
658        let index_store = if is_full_node && config.enable_index_processing {
659            info!("creating index store");
660            Some(Arc::new(IndexStore::new(
661                config.db_path().join("indexes"),
662                &prometheus_registry,
663                epoch_store
664                    .protocol_config()
665                    .max_move_identifier_len_as_option(),
666            )))
667        } else {
668            None
669        };
670
671        let rest_index = if is_full_node && config.enable_rest_api && config.enable_index_processing
672        {
673            Some(Arc::new(
674                RestIndexStore::new(
675                    config.db_path().join("rest_index"),
676                    &store,
677                    &checkpoint_store,
678                    &epoch_store,
679                    &cache_traits.backing_package_store,
680                )
681                .await,
682            ))
683        } else {
684            None
685        };
686
687        info!("creating archive reader");
688        // Create network
689        // TODO only configure validators as seed/preferred peers for validators and not
690        // for fullnodes once we've had a chance to re-work fullnode
691        // configuration generation.
692        let archive_readers =
693            ArchiveReaderBalancer::new(config.archive_reader_config(), &prometheus_registry)?;
694        let (trusted_peer_change_tx, trusted_peer_change_rx) = watch::channel(Default::default());
695        let (randomness_tx, randomness_rx) = mpsc::channel(
696            config
697                .p2p_config
698                .randomness
699                .clone()
700                .unwrap_or_default()
701                .mailbox_capacity(),
702        );
703        let (p2p_network, discovery_handle, state_sync_handle, randomness_handle) =
704            Self::create_p2p_network(
705                &config,
706                state_sync_store.clone(),
707                chain_identifier,
708                trusted_peer_change_rx,
709                archive_readers.clone(),
710                randomness_tx,
711                &prometheus_registry,
712            )?;
713
714        // We must explicitly send this instead of relying on the initial value to
715        // trigger watch value change, so that state-sync is able to process it.
716        send_trusted_peer_change(
717            &config,
718            &trusted_peer_change_tx,
719            epoch_store.epoch_start_state(),
720        );
721
722        info!("start state archival");
723        // Start archiving local state to remote store
724        let state_archive_handle =
725            Self::start_state_archival(&config, &prometheus_registry, state_sync_store.clone())
726                .await?;
727
728        info!("start snapshot upload");
729        // Start uploading state snapshot to remote store
730        let state_snapshot_handle =
731            Self::start_state_snapshot(&config, &prometheus_registry, checkpoint_store.clone())?;
732
733        // Start uploading db checkpoints to remote store
734        info!("start db checkpoint");
735        let (db_checkpoint_config, db_checkpoint_handle) = Self::start_db_checkpoint(
736            &config,
737            &prometheus_registry,
738            state_snapshot_handle.is_some(),
739        )?;
740
741        let mut genesis_objects = genesis.objects().to_vec();
742        if let Some(migration_tx_data) = migration_tx_data.as_ref() {
743            genesis_objects.extend(migration_tx_data.get_objects());
744        }
745
746        let authority_name = config.authority_public_key();
747        let validator_tx_finalizer =
748            config
749                .enable_validator_tx_finalizer
750                .then_some(Arc::new(ValidatorTxFinalizer::new(
751                    auth_agg.clone(),
752                    authority_name,
753                    &prometheus_registry,
754                )));
755
756        info!("create authority state");
757        let state = AuthorityState::new(
758            authority_name,
759            secret,
760            config.supported_protocol_versions.unwrap(),
761            store.clone(),
762            cache_traits.clone(),
763            epoch_store.clone(),
764            committee_store.clone(),
765            index_store.clone(),
766            rest_index,
767            checkpoint_store.clone(),
768            &prometheus_registry,
769            &genesis_objects,
770            &db_checkpoint_config,
771            config.clone(),
772            archive_readers,
773            validator_tx_finalizer,
774            chain_identifier,
775            pruner_db,
776        )
777        .await;
778
779        // ensure genesis and migration txs were executed
780        if epoch_store.epoch() == 0 {
781            let genesis_tx = &genesis.transaction();
782            let span = error_span!("genesis_txn", tx_digest = ?genesis_tx.digest());
783            // Execute genesis transaction
784            Self::execute_transaction_immediately_at_zero_epoch(
785                &state,
786                &epoch_store,
787                genesis_tx,
788                span,
789            )
790            .await;
791
792            // Execute migration transactions if present
793            if let Some(migration_tx_data) = migration_tx_data {
794                for (tx_digest, (tx, _, _)) in migration_tx_data.txs_data() {
795                    let span = error_span!("migration_txn", tx_digest = ?tx_digest);
796                    Self::execute_transaction_immediately_at_zero_epoch(
797                        &state,
798                        &epoch_store,
799                        tx,
800                        span,
801                    )
802                    .await;
803                }
804            }
805        }
806
807        // Start the loop that receives new randomness and generates transactions for
808        // it.
809        RandomnessRoundReceiver::spawn(state.clone(), randomness_rx);
810
811        if config
812            .expensive_safety_check_config
813            .enable_secondary_index_checks()
814        {
815            if let Some(indexes) = state.indexes.clone() {
816                iota_core::verify_indexes::verify_indexes(
817                    state.get_accumulator_store().as_ref(),
818                    indexes,
819                )
820                .expect("secondary indexes are inconsistent");
821            }
822        }
823
824        let (end_of_epoch_channel, end_of_epoch_receiver) =
825            broadcast::channel(config.end_of_epoch_broadcast_channel_capacity);
826
827        let transaction_orchestrator = if is_full_node && run_with_range.is_none() {
828            Some(Arc::new(TransactionOrchestrator::new_with_auth_aggregator(
829                auth_agg.load_full(),
830                state.clone(),
831                end_of_epoch_receiver,
832                &config.db_path(),
833                &prometheus_registry,
834            )))
835        } else {
836            None
837        };
838
839        let http_server = build_http_server(
840            state.clone(),
841            state_sync_store.clone(),
842            &transaction_orchestrator.clone(),
843            &config,
844            &prometheus_registry,
845            custom_rpc_runtime,
846            software_version,
847        )
848        .await?;
849
850        let accumulator = Arc::new(StateAccumulator::new(
851            cache_traits.accumulator_store.clone(),
852            StateAccumulatorMetrics::new(&prometheus_registry),
853        ));
854
855        let authority_names_to_peer_ids = epoch_store
856            .epoch_start_state()
857            .get_authority_names_to_peer_ids();
858
859        let network_connection_metrics =
860            NetworkConnectionMetrics::new("iota", &registry_service.default_registry());
861
862        let authority_names_to_peer_ids = ArcSwap::from_pointee(authority_names_to_peer_ids);
863
864        let (_connection_monitor_handle, connection_statuses) = ConnectionMonitor::spawn(
865            p2p_network.downgrade(),
866            network_connection_metrics,
867            HashMap::new(),
868            None,
869        );
870
871        let connection_monitor_status = ConnectionMonitorStatus {
872            connection_statuses,
873            authority_names_to_peer_ids,
874        };
875
876        let connection_monitor_status = Arc::new(connection_monitor_status);
877        let iota_node_metrics =
878            Arc::new(IotaNodeMetrics::new(&registry_service.default_registry()));
879
880        // Convert transaction orchestrator to executor trait object for gRPC server
881        // Note that the transaction_orchestrator (so as executor) will be None if it is
882        // a validator node or run_with_range is set
883        let executor: Option<Arc<dyn iota_types::transaction_executor::TransactionExecutor>> =
884            transaction_orchestrator
885                .clone()
886                .map(|o| o as Arc<dyn iota_types::transaction_executor::TransactionExecutor>);
887
888        let grpc_server_handle =
889            build_grpc_server(&config, state.clone(), state_sync_store.clone(), executor).await?;
890
891        let validator_components = if state.is_committee_validator(&epoch_store) {
892            let (components, _) = futures::join!(
893                Self::construct_validator_components(
894                    config.clone(),
895                    state.clone(),
896                    committee,
897                    epoch_store.clone(),
898                    checkpoint_store.clone(),
899                    state_sync_handle.clone(),
900                    randomness_handle.clone(),
901                    Arc::downgrade(&accumulator),
902                    backpressure_manager.clone(),
903                    connection_monitor_status.clone(),
904                    &registry_service,
905                    iota_node_metrics.clone(),
906                ),
907                Self::reexecute_pending_consensus_certs(&epoch_store, &state,)
908            );
909            let mut components = components?;
910
911            components.consensus_adapter.submit_recovered(&epoch_store);
912
913            // Start the gRPC server
914            components.validator_server_handle = components.validator_server_handle.start().await;
915
916            Some(components)
917        } else {
918            None
919        };
920
921        // setup shutdown channel
922        let (shutdown_channel, _) = broadcast::channel::<Option<RunWithRange>>(1);
923
924        let node = Self {
925            config,
926            validator_components: Mutex::new(validator_components),
927            _http_server: http_server,
928            state,
929            transaction_orchestrator,
930            registry_service,
931            metrics: iota_node_metrics,
932
933            _discovery: discovery_handle,
934            state_sync_handle,
935            randomness_handle,
936            checkpoint_store,
937            accumulator: Mutex::new(Some(accumulator)),
938            end_of_epoch_channel,
939            connection_monitor_status,
940            trusted_peer_change_tx,
941            backpressure_manager,
942
943            _db_checkpoint_handle: db_checkpoint_handle,
944
945            #[cfg(msim)]
946            sim_state: Default::default(),
947
948            _state_archive_handle: state_archive_handle,
949            _state_snapshot_uploader_handle: state_snapshot_handle,
950            shutdown_channel_tx: shutdown_channel,
951
952            grpc_server_handle: Mutex::new(grpc_server_handle),
953
954            auth_agg,
955        };
956
957        info!("IotaNode started!");
958        let node = Arc::new(node);
959        let node_copy = node.clone();
960        spawn_monitored_task!(async move {
961            let result = Self::monitor_reconfiguration(node_copy, epoch_store).await;
962            if let Err(error) = result {
963                warn!("Reconfiguration finished with error {:?}", error);
964            }
965        });
966
967        Ok(node)
968    }
969
970    pub fn subscribe_to_epoch_change(&self) -> broadcast::Receiver<IotaSystemState> {
971        self.end_of_epoch_channel.subscribe()
972    }
973
974    pub fn subscribe_to_shutdown_channel(&self) -> broadcast::Receiver<Option<RunWithRange>> {
975        self.shutdown_channel_tx.subscribe()
976    }
977
978    pub fn current_epoch_for_testing(&self) -> EpochId {
979        self.state.current_epoch_for_testing()
980    }
981
982    pub fn db_checkpoint_path(&self) -> PathBuf {
983        self.config.db_checkpoint_path()
984    }
985
986    // Init reconfig process by starting to reject user certs
987    pub async fn close_epoch(&self, epoch_store: &Arc<AuthorityPerEpochStore>) -> IotaResult {
988        info!("close_epoch (current epoch = {})", epoch_store.epoch());
989        self.validator_components
990            .lock()
991            .await
992            .as_ref()
993            .ok_or_else(|| IotaError::from("Node is not a validator"))?
994            .consensus_adapter
995            .close_epoch(epoch_store);
996        Ok(())
997    }
998
999    pub fn clear_override_protocol_upgrade_buffer_stake(&self, epoch: EpochId) -> IotaResult {
1000        self.state
1001            .clear_override_protocol_upgrade_buffer_stake(epoch)
1002    }
1003
1004    pub fn set_override_protocol_upgrade_buffer_stake(
1005        &self,
1006        epoch: EpochId,
1007        buffer_stake_bps: u64,
1008    ) -> IotaResult {
1009        self.state
1010            .set_override_protocol_upgrade_buffer_stake(epoch, buffer_stake_bps)
1011    }
1012
1013    // Testing-only API to start epoch close process.
1014    // For production code, please use the non-testing version.
1015    pub async fn close_epoch_for_testing(&self) -> IotaResult {
1016        let epoch_store = self.state.epoch_store_for_testing();
1017        self.close_epoch(&epoch_store).await
1018    }
1019
1020    async fn start_state_archival(
1021        config: &NodeConfig,
1022        prometheus_registry: &Registry,
1023        state_sync_store: RocksDbStore,
1024    ) -> Result<Option<tokio::sync::broadcast::Sender<()>>> {
1025        if let Some(remote_store_config) = &config.state_archive_write_config.object_store_config {
1026            let local_store_config = ObjectStoreConfig {
1027                object_store: Some(ObjectStoreType::File),
1028                directory: Some(config.archive_path()),
1029                ..Default::default()
1030            };
1031            let archive_writer = ArchiveWriter::new(
1032                local_store_config,
1033                remote_store_config.clone(),
1034                FileCompression::Zstd,
1035                StorageFormat::Blob,
1036                Duration::from_secs(600),
1037                256 * 1024 * 1024,
1038                prometheus_registry,
1039            )
1040            .await?;
1041            Ok(Some(archive_writer.start(state_sync_store).await?))
1042        } else {
1043            Ok(None)
1044        }
1045    }
1046
1047    /// Creates an StateSnapshotUploader and start it if the StateSnapshotConfig
1048    /// is set.
1049    fn start_state_snapshot(
1050        config: &NodeConfig,
1051        prometheus_registry: &Registry,
1052        checkpoint_store: Arc<CheckpointStore>,
1053    ) -> Result<Option<tokio::sync::broadcast::Sender<()>>> {
1054        if let Some(remote_store_config) = &config.state_snapshot_write_config.object_store_config {
1055            let snapshot_uploader = StateSnapshotUploader::new(
1056                &config.db_checkpoint_path(),
1057                &config.snapshot_path(),
1058                remote_store_config.clone(),
1059                60,
1060                prometheus_registry,
1061                checkpoint_store,
1062            )?;
1063            Ok(Some(snapshot_uploader.start()))
1064        } else {
1065            Ok(None)
1066        }
1067    }
1068
1069    fn start_db_checkpoint(
1070        config: &NodeConfig,
1071        prometheus_registry: &Registry,
1072        state_snapshot_enabled: bool,
1073    ) -> Result<(
1074        DBCheckpointConfig,
1075        Option<tokio::sync::broadcast::Sender<()>>,
1076    )> {
1077        let checkpoint_path = Some(
1078            config
1079                .db_checkpoint_config
1080                .checkpoint_path
1081                .clone()
1082                .unwrap_or_else(|| config.db_checkpoint_path()),
1083        );
1084        let db_checkpoint_config = if config.db_checkpoint_config.checkpoint_path.is_none() {
1085            DBCheckpointConfig {
1086                checkpoint_path,
1087                perform_db_checkpoints_at_epoch_end: if state_snapshot_enabled {
1088                    true
1089                } else {
1090                    config
1091                        .db_checkpoint_config
1092                        .perform_db_checkpoints_at_epoch_end
1093                },
1094                ..config.db_checkpoint_config.clone()
1095            }
1096        } else {
1097            config.db_checkpoint_config.clone()
1098        };
1099
1100        match (
1101            db_checkpoint_config.object_store_config.as_ref(),
1102            state_snapshot_enabled,
1103        ) {
1104            // If db checkpoint config object store not specified but
1105            // state snapshot object store is specified, create handler
1106            // anyway for marking db checkpoints as completed so that they
1107            // can be uploaded as state snapshots.
1108            (None, false) => Ok((db_checkpoint_config, None)),
1109            (_, _) => {
1110                let handler = DBCheckpointHandler::new(
1111                    &db_checkpoint_config.checkpoint_path.clone().unwrap(),
1112                    db_checkpoint_config.object_store_config.as_ref(),
1113                    60,
1114                    db_checkpoint_config
1115                        .prune_and_compact_before_upload
1116                        .unwrap_or(true),
1117                    config.authority_store_pruning_config.clone(),
1118                    prometheus_registry,
1119                    state_snapshot_enabled,
1120                )?;
1121                Ok((
1122                    db_checkpoint_config,
1123                    Some(DBCheckpointHandler::start(handler)),
1124                ))
1125            }
1126        }
1127    }
1128
1129    fn create_p2p_network(
1130        config: &NodeConfig,
1131        state_sync_store: RocksDbStore,
1132        chain_identifier: ChainIdentifier,
1133        trusted_peer_change_rx: watch::Receiver<TrustedPeerChangeEvent>,
1134        archive_readers: ArchiveReaderBalancer,
1135        randomness_tx: mpsc::Sender<(EpochId, RandomnessRound, Vec<u8>)>,
1136        prometheus_registry: &Registry,
1137    ) -> Result<(
1138        Network,
1139        discovery::Handle,
1140        state_sync::Handle,
1141        randomness::Handle,
1142    )> {
1143        let (state_sync, state_sync_server) = state_sync::Builder::new()
1144            .config(config.p2p_config.state_sync.clone().unwrap_or_default())
1145            .store(state_sync_store)
1146            .archive_readers(archive_readers)
1147            .with_metrics(prometheus_registry)
1148            .build();
1149
1150        let (discovery, discovery_server) = discovery::Builder::new(trusted_peer_change_rx)
1151            .config(config.p2p_config.clone())
1152            .build();
1153
1154        let (randomness, randomness_router) =
1155            randomness::Builder::new(config.authority_public_key(), randomness_tx)
1156                .config(config.p2p_config.randomness.clone().unwrap_or_default())
1157                .with_metrics(prometheus_registry)
1158                .build();
1159
1160        let p2p_network = {
1161            let routes = anemo::Router::new()
1162                .add_rpc_service(discovery_server)
1163                .add_rpc_service(state_sync_server);
1164            let routes = routes.merge(randomness_router);
1165
1166            let inbound_network_metrics =
1167                NetworkMetrics::new("iota", "inbound", prometheus_registry);
1168            let outbound_network_metrics =
1169                NetworkMetrics::new("iota", "outbound", prometheus_registry);
1170
1171            let service = ServiceBuilder::new()
1172                .layer(
1173                    TraceLayer::new_for_server_errors()
1174                        .make_span_with(DefaultMakeSpan::new().level(tracing::Level::INFO))
1175                        .on_failure(DefaultOnFailure::new().level(tracing::Level::WARN)),
1176                )
1177                .layer(CallbackLayer::new(MetricsMakeCallbackHandler::new(
1178                    Arc::new(inbound_network_metrics),
1179                    config.p2p_config.excessive_message_size(),
1180                )))
1181                .service(routes);
1182
1183            let outbound_layer = ServiceBuilder::new()
1184                .layer(
1185                    TraceLayer::new_for_client_and_server_errors()
1186                        .make_span_with(DefaultMakeSpan::new().level(tracing::Level::INFO))
1187                        .on_failure(DefaultOnFailure::new().level(tracing::Level::WARN)),
1188                )
1189                .layer(CallbackLayer::new(MetricsMakeCallbackHandler::new(
1190                    Arc::new(outbound_network_metrics),
1191                    config.p2p_config.excessive_message_size(),
1192                )))
1193                .into_inner();
1194
1195            let mut anemo_config = config.p2p_config.anemo_config.clone().unwrap_or_default();
1196            // Set the max_frame_size to be 1 GB to work around the issue of there being too
1197            // many staking events in the epoch change txn.
1198            anemo_config.max_frame_size = Some(1 << 30);
1199
1200            // Set a higher default value for socket send/receive buffers if not already
1201            // configured.
1202            let mut quic_config = anemo_config.quic.unwrap_or_default();
1203            if quic_config.socket_send_buffer_size.is_none() {
1204                quic_config.socket_send_buffer_size = Some(20 << 20);
1205            }
1206            if quic_config.socket_receive_buffer_size.is_none() {
1207                quic_config.socket_receive_buffer_size = Some(20 << 20);
1208            }
1209            quic_config.allow_failed_socket_buffer_size_setting = true;
1210
1211            // Set high-performance defaults for quinn transport.
1212            // With 200MiB buffer size and ~500ms RTT, max throughput ~400MiB/s.
1213            if quic_config.max_concurrent_bidi_streams.is_none() {
1214                quic_config.max_concurrent_bidi_streams = Some(500);
1215            }
1216            if quic_config.max_concurrent_uni_streams.is_none() {
1217                quic_config.max_concurrent_uni_streams = Some(500);
1218            }
1219            if quic_config.stream_receive_window.is_none() {
1220                quic_config.stream_receive_window = Some(100 << 20);
1221            }
1222            if quic_config.receive_window.is_none() {
1223                quic_config.receive_window = Some(200 << 20);
1224            }
1225            if quic_config.send_window.is_none() {
1226                quic_config.send_window = Some(200 << 20);
1227            }
1228            if quic_config.crypto_buffer_size.is_none() {
1229                quic_config.crypto_buffer_size = Some(1 << 20);
1230            }
1231            if quic_config.max_idle_timeout_ms.is_none() {
1232                quic_config.max_idle_timeout_ms = Some(30_000);
1233            }
1234            if quic_config.keep_alive_interval_ms.is_none() {
1235                quic_config.keep_alive_interval_ms = Some(5_000);
1236            }
1237            anemo_config.quic = Some(quic_config);
1238
1239            let server_name = format!("iota-{chain_identifier}");
1240            let network = Network::bind(config.p2p_config.listen_address)
1241                .server_name(&server_name)
1242                .private_key(config.network_key_pair().copy().private().0.to_bytes())
1243                .config(anemo_config)
1244                .outbound_request_layer(outbound_layer)
1245                .start(service)?;
1246            info!(
1247                server_name = server_name,
1248                "P2p network started on {}",
1249                network.local_addr()
1250            );
1251
1252            network
1253        };
1254
1255        let discovery_handle =
1256            discovery.start(p2p_network.clone(), config.network_key_pair().copy());
1257        let state_sync_handle = state_sync.start(p2p_network.clone());
1258        let randomness_handle = randomness.start(p2p_network.clone());
1259
1260        Ok((
1261            p2p_network,
1262            discovery_handle,
1263            state_sync_handle,
1264            randomness_handle,
1265        ))
1266    }
1267
1268    /// Asynchronously constructs and initializes the components necessary for
1269    /// the validator node.
1270    async fn construct_validator_components(
1271        config: NodeConfig,
1272        state: Arc<AuthorityState>,
1273        committee: Arc<Committee>,
1274        epoch_store: Arc<AuthorityPerEpochStore>,
1275        checkpoint_store: Arc<CheckpointStore>,
1276        state_sync_handle: state_sync::Handle,
1277        randomness_handle: randomness::Handle,
1278        accumulator: Weak<StateAccumulator>,
1279        backpressure_manager: Arc<BackpressureManager>,
1280        connection_monitor_status: Arc<ConnectionMonitorStatus>,
1281        registry_service: &RegistryService,
1282        iota_node_metrics: Arc<IotaNodeMetrics>,
1283    ) -> Result<ValidatorComponents> {
1284        let mut config_clone = config.clone();
1285        let consensus_config = config_clone
1286            .consensus_config
1287            .as_mut()
1288            .ok_or_else(|| anyhow!("Validator is missing consensus config"))?;
1289        let validator_registry = Registry::new();
1290        let validator_registry_id = registry_service.add(validator_registry.clone());
1291
1292        let client = Arc::new(UpdatableConsensusClient::new());
1293        let consensus_adapter = Arc::new(Self::construct_consensus_adapter(
1294            &committee,
1295            consensus_config,
1296            state.name,
1297            connection_monitor_status.clone(),
1298            &validator_registry,
1299            client.clone(),
1300            checkpoint_store.clone(),
1301        ));
1302        let consensus_manager = ConsensusManager::new(
1303            &config,
1304            consensus_config,
1305            registry_service,
1306            &validator_registry,
1307            client,
1308        );
1309
1310        // This only gets started up once, not on every epoch. (Make call to remove
1311        // every epoch.)
1312        let consensus_store_pruner = ConsensusStorePruner::new(
1313            consensus_manager.get_storage_base_path(),
1314            consensus_config.db_retention_epochs(),
1315            consensus_config.db_pruner_period(),
1316            &validator_registry,
1317        );
1318
1319        let checkpoint_metrics = CheckpointMetrics::new(&validator_registry);
1320        let iota_tx_validator_metrics = IotaTxValidatorMetrics::new(&validator_registry);
1321
1322        let validator_server_handle = Self::start_grpc_validator_service(
1323            &config,
1324            state.clone(),
1325            consensus_adapter.clone(),
1326            &validator_registry,
1327        )
1328        .await?;
1329
1330        // Starts an overload monitor that monitors the execution of the authority.
1331        // Don't start the overload monitor when max_load_shedding_percentage is 0.
1332        let validator_overload_monitor_handle = if config
1333            .authority_overload_config
1334            .max_load_shedding_percentage
1335            > 0
1336        {
1337            let authority_state = Arc::downgrade(&state);
1338            let overload_config = config.authority_overload_config.clone();
1339            fail_point!("starting_overload_monitor");
1340            Some(spawn_monitored_task!(overload_monitor(
1341                authority_state,
1342                overload_config,
1343            )))
1344        } else {
1345            None
1346        };
1347
1348        Self::start_epoch_specific_validator_components(
1349            &config,
1350            state.clone(),
1351            consensus_adapter,
1352            checkpoint_store,
1353            epoch_store,
1354            state_sync_handle,
1355            randomness_handle,
1356            consensus_manager,
1357            consensus_store_pruner,
1358            accumulator,
1359            backpressure_manager,
1360            validator_server_handle,
1361            validator_overload_monitor_handle,
1362            checkpoint_metrics,
1363            iota_node_metrics,
1364            iota_tx_validator_metrics,
1365            validator_registry_id,
1366        )
1367        .await
1368    }
1369
1370    /// Initializes and starts components specific to the current
1371    /// epoch for the validator node.
1372    async fn start_epoch_specific_validator_components(
1373        config: &NodeConfig,
1374        state: Arc<AuthorityState>,
1375        consensus_adapter: Arc<ConsensusAdapter>,
1376        checkpoint_store: Arc<CheckpointStore>,
1377        epoch_store: Arc<AuthorityPerEpochStore>,
1378        state_sync_handle: state_sync::Handle,
1379        randomness_handle: randomness::Handle,
1380        consensus_manager: ConsensusManager,
1381        consensus_store_pruner: ConsensusStorePruner,
1382        accumulator: Weak<StateAccumulator>,
1383        backpressure_manager: Arc<BackpressureManager>,
1384        validator_server_handle: SpawnOnce,
1385        validator_overload_monitor_handle: Option<JoinHandle<()>>,
1386        checkpoint_metrics: Arc<CheckpointMetrics>,
1387        iota_node_metrics: Arc<IotaNodeMetrics>,
1388        iota_tx_validator_metrics: Arc<IotaTxValidatorMetrics>,
1389        validator_registry_id: RegistryID,
1390    ) -> Result<ValidatorComponents> {
1391        let checkpoint_service = Self::build_checkpoint_service(
1392            config,
1393            consensus_adapter.clone(),
1394            checkpoint_store.clone(),
1395            epoch_store.clone(),
1396            state.clone(),
1397            state_sync_handle,
1398            accumulator,
1399            checkpoint_metrics.clone(),
1400        );
1401
1402        // create a new map that gets injected into both the consensus handler and the
1403        // consensus adapter the consensus handler will write values forwarded
1404        // from consensus, and the consensus adapter will read the values to
1405        // make decisions about which validator submits a transaction to consensus
1406        let low_scoring_authorities = Arc::new(ArcSwap::new(Arc::new(HashMap::new())));
1407
1408        consensus_adapter.swap_low_scoring_authorities(low_scoring_authorities.clone());
1409
1410        let randomness_manager = RandomnessManager::try_new(
1411            Arc::downgrade(&epoch_store),
1412            Box::new(consensus_adapter.clone()),
1413            randomness_handle,
1414            config.authority_key_pair(),
1415        )
1416        .await;
1417        if let Some(randomness_manager) = randomness_manager {
1418            epoch_store
1419                .set_randomness_manager(randomness_manager)
1420                .await?;
1421        }
1422
1423        let consensus_handler_initializer = ConsensusHandlerInitializer::new(
1424            state.clone(),
1425            checkpoint_service.clone(),
1426            epoch_store.clone(),
1427            low_scoring_authorities,
1428            backpressure_manager,
1429        );
1430
1431        info!("Starting consensus manager");
1432
1433        consensus_manager
1434            .start(
1435                config,
1436                epoch_store.clone(),
1437                consensus_handler_initializer,
1438                IotaTxValidator::new(
1439                    epoch_store.clone(),
1440                    checkpoint_service.clone(),
1441                    state.transaction_manager().clone(),
1442                    iota_tx_validator_metrics.clone(),
1443                ),
1444            )
1445            .await;
1446
1447        info!("Spawning checkpoint service");
1448        let checkpoint_service_tasks = checkpoint_service.spawn().await;
1449
1450        if epoch_store.authenticator_state_enabled() {
1451            Self::start_jwk_updater(
1452                config,
1453                iota_node_metrics,
1454                state.name,
1455                epoch_store.clone(),
1456                consensus_adapter.clone(),
1457            );
1458        }
1459
1460        Ok(ValidatorComponents {
1461            validator_server_handle,
1462            validator_overload_monitor_handle,
1463            consensus_manager,
1464            consensus_store_pruner,
1465            consensus_adapter,
1466            checkpoint_service_tasks,
1467            checkpoint_metrics,
1468            iota_tx_validator_metrics,
1469            validator_registry_id,
1470        })
1471    }
1472
1473    /// Starts the checkpoint service for the validator node, initializing
1474    /// necessary components and settings.
1475    /// The function ensures proper initialization of the checkpoint service,
1476    /// preparing it to handle checkpoint creation and submission to consensus,
1477    /// while also setting up the necessary monitoring and synchronization
1478    /// mechanisms.
1479    fn build_checkpoint_service(
1480        config: &NodeConfig,
1481        consensus_adapter: Arc<ConsensusAdapter>,
1482        checkpoint_store: Arc<CheckpointStore>,
1483        epoch_store: Arc<AuthorityPerEpochStore>,
1484        state: Arc<AuthorityState>,
1485        state_sync_handle: state_sync::Handle,
1486        accumulator: Weak<StateAccumulator>,
1487        checkpoint_metrics: Arc<CheckpointMetrics>,
1488    ) -> Arc<CheckpointService> {
1489        let epoch_start_timestamp_ms = epoch_store.epoch_start_state().epoch_start_timestamp_ms();
1490        let epoch_duration_ms = epoch_store.epoch_start_state().epoch_duration_ms();
1491
1492        debug!(
1493            "Starting checkpoint service with epoch start timestamp {}
1494            and epoch duration {}",
1495            epoch_start_timestamp_ms, epoch_duration_ms
1496        );
1497
1498        let checkpoint_output = Box::new(SubmitCheckpointToConsensus {
1499            sender: consensus_adapter,
1500            signer: state.secret.clone(),
1501            authority: config.authority_public_key(),
1502            next_reconfiguration_timestamp_ms: epoch_start_timestamp_ms
1503                .checked_add(epoch_duration_ms)
1504                .expect("Overflow calculating next_reconfiguration_timestamp_ms"),
1505            metrics: checkpoint_metrics.clone(),
1506        });
1507
1508        let certified_checkpoint_output = SendCheckpointToStateSync::new(state_sync_handle);
1509        let max_tx_per_checkpoint = max_tx_per_checkpoint(epoch_store.protocol_config());
1510        let max_checkpoint_size_bytes =
1511            epoch_store.protocol_config().max_checkpoint_size_bytes() as usize;
1512
1513        CheckpointService::build(
1514            state.clone(),
1515            checkpoint_store,
1516            epoch_store,
1517            state.get_transaction_cache_reader().clone(),
1518            accumulator,
1519            checkpoint_output,
1520            Box::new(certified_checkpoint_output),
1521            checkpoint_metrics,
1522            max_tx_per_checkpoint,
1523            max_checkpoint_size_bytes,
1524        )
1525    }
1526
1527    fn construct_consensus_adapter(
1528        committee: &Committee,
1529        consensus_config: &ConsensusConfig,
1530        authority: AuthorityName,
1531        connection_monitor_status: Arc<ConnectionMonitorStatus>,
1532        prometheus_registry: &Registry,
1533        consensus_client: Arc<dyn ConsensusClient>,
1534        checkpoint_store: Arc<CheckpointStore>,
1535    ) -> ConsensusAdapter {
1536        let ca_metrics = ConsensusAdapterMetrics::new(prometheus_registry);
1537        // The consensus adapter allows the authority to send user certificates through
1538        // consensus.
1539
1540        ConsensusAdapter::new(
1541            consensus_client,
1542            checkpoint_store,
1543            authority,
1544            connection_monitor_status,
1545            consensus_config.max_pending_transactions(),
1546            consensus_config.max_pending_transactions() * 2 / committee.num_members(),
1547            consensus_config.max_submit_position,
1548            consensus_config.submit_delay_step_override(),
1549            ca_metrics,
1550        )
1551    }
1552
1553    async fn start_grpc_validator_service(
1554        config: &NodeConfig,
1555        state: Arc<AuthorityState>,
1556        consensus_adapter: Arc<ConsensusAdapter>,
1557        prometheus_registry: &Registry,
1558    ) -> Result<SpawnOnce> {
1559        let validator_service = ValidatorService::new(
1560            state.clone(),
1561            consensus_adapter,
1562            Arc::new(ValidatorServiceMetrics::new(prometheus_registry)),
1563            TrafficControllerMetrics::new(prometheus_registry),
1564            config.policy_config.clone(),
1565            config.firewall_config.clone(),
1566        );
1567
1568        let mut server_conf = iota_network_stack::config::Config::new();
1569        server_conf.global_concurrency_limit = config.grpc_concurrency_limit;
1570        server_conf.load_shed = config.grpc_load_shed;
1571        let server_builder =
1572            ServerBuilder::from_config(&server_conf, GrpcMetrics::new(prometheus_registry))
1573                .add_service(ValidatorServer::new(validator_service));
1574
1575        let tls_config = iota_tls::create_rustls_server_config(
1576            config.network_key_pair().copy().private(),
1577            IOTA_TLS_SERVER_NAME.to_string(),
1578        );
1579
1580        let network_address = config.network_address().clone();
1581
1582        let bind_future = async move {
1583            let server = server_builder
1584                .bind(&network_address, Some(tls_config))
1585                .await
1586                .map_err(|err| anyhow!("Failed to bind to {network_address}: {err}"))?;
1587
1588            let local_addr = server.local_addr();
1589            info!("Listening to traffic on {local_addr}");
1590
1591            Ok(server)
1592        };
1593
1594        Ok(SpawnOnce::new(bind_future))
1595    }
1596
1597    /// Re-executes pending consensus certificates, which may not have been
1598    /// committed to disk before the node restarted. This is necessary for
1599    /// the following reasons:
1600    ///
1601    /// 1. For any transaction for which we returned signed effects to a client,
1602    ///    we must ensure that we have re-executed the transaction before we
1603    ///    begin accepting grpc requests. Otherwise we would appear to have
1604    ///    forgotten about the transaction.
1605    /// 2. While this is running, we are concurrently waiting for all previously
1606    ///    built checkpoints to be rebuilt. Since there may be dependencies in
1607    ///    either direction (from checkpointed consensus transactions to pending
1608    ///    consensus transactions, or vice versa), we must re-execute pending
1609    ///    consensus transactions to ensure that both processes can complete.
1610    /// 3. Also note that for any pending consensus transactions for which we
1611    ///    wrote a signed effects digest to disk, we must re-execute using that
1612    ///    digest as the expected effects digest, to ensure that we cannot
1613    ///    arrive at different effects than what we previously signed.
1614    async fn reexecute_pending_consensus_certs(
1615        epoch_store: &Arc<AuthorityPerEpochStore>,
1616        state: &Arc<AuthorityState>,
1617    ) {
1618        let mut pending_consensus_certificates = Vec::new();
1619        let mut additional_certs = Vec::new();
1620
1621        for tx in epoch_store.get_all_pending_consensus_transactions() {
1622            match tx.kind {
1623                // Shared object txns cannot be re-executed at this point, because we must wait for
1624                // consensus replay to assign shared object versions.
1625                ConsensusTransactionKind::CertifiedTransaction(tx)
1626                    if !tx.contains_shared_object() =>
1627                {
1628                    let tx = *tx;
1629                    // new_unchecked is safe because we never submit a transaction to consensus
1630                    // without verifying it
1631                    let tx = VerifiedExecutableTransaction::new_from_certificate(
1632                        VerifiedCertificate::new_unchecked(tx),
1633                    );
1634                    // we only need to re-execute if we previously signed the effects (which
1635                    // indicates we returned the effects to a client).
1636                    if let Some(fx_digest) = epoch_store
1637                        .get_signed_effects_digest(tx.digest())
1638                        .expect("db error")
1639                    {
1640                        pending_consensus_certificates.push((tx, fx_digest));
1641                    } else {
1642                        additional_certs.push(tx);
1643                    }
1644                }
1645                _ => (),
1646            }
1647        }
1648
1649        let digests = pending_consensus_certificates
1650            .iter()
1651            .map(|(tx, _)| *tx.digest())
1652            .collect::<Vec<_>>();
1653
1654        info!(
1655            "reexecuting {} pending consensus certificates: {:?}",
1656            digests.len(),
1657            digests
1658        );
1659
1660        state.enqueue_with_expected_effects_digest(pending_consensus_certificates, epoch_store);
1661        state.enqueue_transactions_for_execution(additional_certs, epoch_store);
1662
1663        // If this times out, the validator will still almost certainly start up fine.
1664        // But, it is possible that it may temporarily "forget" about
1665        // transactions that it had previously executed. This could confuse
1666        // clients in some circumstances. However, the transactions are still in
1667        // pending_consensus_certificates, so we cannot lose any finality guarantees.
1668        let timeout = if cfg!(msim) { 120 } else { 60 };
1669        if tokio::time::timeout(
1670            std::time::Duration::from_secs(timeout),
1671            state
1672                .get_transaction_cache_reader()
1673                .try_notify_read_executed_effects_digests(&digests),
1674        )
1675        .await
1676        .is_err()
1677        {
1678            // Log all the digests that were not executed to help debugging.
1679            if let Ok(executed_effects_digests) = state
1680                .get_transaction_cache_reader()
1681                .try_multi_get_executed_effects_digests(&digests)
1682            {
1683                let pending_digests = digests
1684                    .iter()
1685                    .zip(executed_effects_digests.iter())
1686                    .filter_map(|(digest, executed_effects_digest)| {
1687                        if executed_effects_digest.is_none() {
1688                            Some(digest)
1689                        } else {
1690                            None
1691                        }
1692                    })
1693                    .collect::<Vec<_>>();
1694                debug_fatal!(
1695                    "Timed out waiting for effects digests to be executed: {:?}",
1696                    pending_digests
1697                );
1698            } else {
1699                debug_fatal!(
1700                    "Timed out waiting for effects digests to be executed, digests not found"
1701                );
1702            }
1703        }
1704    }
1705
1706    pub fn state(&self) -> Arc<AuthorityState> {
1707        self.state.clone()
1708    }
1709
1710    // Only used for testing because of how epoch store is loaded.
1711    pub fn reference_gas_price_for_testing(&self) -> Result<u64, anyhow::Error> {
1712        self.state.reference_gas_price_for_testing()
1713    }
1714
1715    pub fn clone_committee_store(&self) -> Arc<CommitteeStore> {
1716        self.state.committee_store().clone()
1717    }
1718
1719    // pub fn clone_authority_store(&self) -> Arc<AuthorityStore> {
1720    // self.state.db()
1721    // }
1722
1723    /// Clone an AuthorityAggregator currently used in this node's
1724    /// QuorumDriver, if the node is a fullnode. After reconfig,
1725    /// QuorumDriver builds a new AuthorityAggregator. The caller
1726    /// of this function will mostly likely want to call this again
1727    /// to get a fresh one.
1728    pub fn clone_authority_aggregator(
1729        &self,
1730    ) -> Option<Arc<AuthorityAggregator<NetworkAuthorityClient>>> {
1731        self.transaction_orchestrator
1732            .as_ref()
1733            .map(|to| to.clone_authority_aggregator())
1734    }
1735
1736    pub fn transaction_orchestrator(
1737        &self,
1738    ) -> Option<Arc<TransactionOrchestrator<NetworkAuthorityClient>>> {
1739        self.transaction_orchestrator.clone()
1740    }
1741
1742    pub fn subscribe_to_transaction_orchestrator_effects(
1743        &self,
1744    ) -> Result<tokio::sync::broadcast::Receiver<QuorumDriverEffectsQueueResult>> {
1745        self.transaction_orchestrator
1746            .as_ref()
1747            .map(|to| to.subscribe_to_effects_queue())
1748            .ok_or_else(|| anyhow::anyhow!("Transaction Orchestrator is not enabled in this node."))
1749    }
1750
1751    /// This function awaits the completion of checkpoint execution of the
1752    /// current epoch, after which it initiates reconfiguration of the
1753    /// entire system. This function also handles role changes for the node when
1754    /// epoch changes and advertises capabilities to the committee if the node
1755    /// is a validator.
1756    pub async fn monitor_reconfiguration(
1757        self: Arc<Self>,
1758        mut epoch_store: Arc<AuthorityPerEpochStore>,
1759    ) -> Result<()> {
1760        let checkpoint_executor_metrics =
1761            CheckpointExecutorMetrics::new(&self.registry_service.default_registry());
1762
1763        loop {
1764            let mut accumulator_guard = self.accumulator.lock().await;
1765            let accumulator = accumulator_guard.take().unwrap();
1766            info!(
1767                "Creating checkpoint executor for epoch {}",
1768                epoch_store.epoch()
1769            );
1770
1771            // Create closures that handle gRPC type conversion
1772            let data_sender = if let Ok(guard) = self.grpc_server_handle.try_lock() {
1773                guard.as_ref().map(|handle| {
1774                    let tx = handle.checkpoint_data_broadcaster().clone();
1775                    Box::new(move |data: &CheckpointData| {
1776                        tx.send_traced(data);
1777                    }) as Box<dyn Fn(&CheckpointData) + Send + Sync>
1778                })
1779            } else {
1780                None
1781            };
1782
1783            let checkpoint_executor = CheckpointExecutor::new(
1784                epoch_store.clone(),
1785                self.checkpoint_store.clone(),
1786                self.state.clone(),
1787                accumulator.clone(),
1788                self.backpressure_manager.clone(),
1789                self.config.checkpoint_executor_config.clone(),
1790                checkpoint_executor_metrics.clone(),
1791                data_sender,
1792            );
1793
1794            let run_with_range = self.config.run_with_range;
1795
1796            let cur_epoch_store = self.state.load_epoch_store_one_call_per_task();
1797
1798            // Advertise capabilities to committee, if we are a validator.
1799            if let Some(components) = &*self.validator_components.lock().await {
1800                // TODO: without this sleep, the consensus message is not delivered reliably.
1801                tokio::time::sleep(Duration::from_millis(1)).await;
1802
1803                let config = cur_epoch_store.protocol_config();
1804                let binary_config = to_binary_config(config);
1805                let transaction = ConsensusTransaction::new_capability_notification_v1(
1806                    AuthorityCapabilitiesV1::new(
1807                        self.state.name,
1808                        cur_epoch_store.get_chain_identifier().chain(),
1809                        self.config
1810                            .supported_protocol_versions
1811                            .expect("Supported versions should be populated")
1812                            // no need to send digests of versions less than the current version
1813                            .truncate_below(config.version),
1814                        self.state
1815                            .get_available_system_packages(&binary_config)
1816                            .await,
1817                    ),
1818                );
1819                info!(?transaction, "submitting capabilities to consensus");
1820                components
1821                    .consensus_adapter
1822                    .submit(transaction, None, &cur_epoch_store)?;
1823            } else if self.state.is_active_validator(&cur_epoch_store)
1824                && cur_epoch_store
1825                    .protocol_config()
1826                    .track_non_committee_eligible_validators()
1827            {
1828                // Send signed capabilities to committee validators if we are a non-committee
1829                // validator in a separate task to not block the caller. Sending is done only if
1830                // the feature flag supporting it is enabled.
1831                let epoch_store = cur_epoch_store.clone();
1832                let node_clone = self.clone();
1833                spawn_monitored_task!(epoch_store.clone().within_alive_epoch(async move {
1834                    node_clone
1835                        .send_signed_capability_notification_to_committee_with_retry(&epoch_store)
1836                        .instrument(trace_span!(
1837                            "send_signed_capability_notification_to_committee_with_retry"
1838                        ))
1839                        .await;
1840                }));
1841            }
1842
1843            let stop_condition = checkpoint_executor.run_epoch(run_with_range).await;
1844
1845            if stop_condition == StopReason::RunWithRangeCondition {
1846                IotaNode::shutdown(&self).await;
1847                self.shutdown_channel_tx
1848                    .send(run_with_range)
1849                    .expect("RunWithRangeCondition met but failed to send shutdown message");
1850                return Ok(());
1851            }
1852
1853            // Safe to call because we are in the middle of reconfiguration.
1854            let latest_system_state = self
1855                .state
1856                .get_object_cache_reader()
1857                .try_get_iota_system_state_object_unsafe()
1858                .expect("Read IOTA System State object cannot fail");
1859
1860            #[cfg(msim)]
1861            if !self
1862                .sim_state
1863                .sim_safe_mode_expected
1864                .load(Ordering::Relaxed)
1865            {
1866                debug_assert!(!latest_system_state.safe_mode());
1867            }
1868
1869            #[cfg(not(msim))]
1870            debug_assert!(!latest_system_state.safe_mode());
1871
1872            if let Err(err) = self.end_of_epoch_channel.send(latest_system_state.clone()) {
1873                if self.state.is_fullnode(&cur_epoch_store) {
1874                    warn!(
1875                        "Failed to send end of epoch notification to subscriber: {:?}",
1876                        err
1877                    );
1878                }
1879            }
1880
1881            cur_epoch_store.record_is_safe_mode_metric(latest_system_state.safe_mode());
1882            let new_epoch_start_state = latest_system_state.into_epoch_start_state();
1883
1884            self.auth_agg.store(Arc::new(
1885                self.auth_agg
1886                    .load()
1887                    .recreate_with_new_epoch_start_state(&new_epoch_start_state),
1888            ));
1889
1890            let next_epoch_committee = new_epoch_start_state.get_iota_committee();
1891            let next_epoch = next_epoch_committee.epoch();
1892            assert_eq!(cur_epoch_store.epoch() + 1, next_epoch);
1893
1894            info!(
1895                next_epoch,
1896                "Finished executing all checkpoints in epoch. About to reconfigure the system."
1897            );
1898
1899            fail_point_async!("reconfig_delay");
1900
1901            // We save the connection monitor status map regardless of validator / fullnode
1902            // status so that we don't need to restart the connection monitor
1903            // every epoch. Update the mappings that will be used by the
1904            // consensus adapter if it exists or is about to be created.
1905            let authority_names_to_peer_ids =
1906                new_epoch_start_state.get_authority_names_to_peer_ids();
1907            self.connection_monitor_status
1908                .update_mapping_for_epoch(authority_names_to_peer_ids);
1909
1910            cur_epoch_store.record_epoch_reconfig_start_time_metric();
1911
1912            send_trusted_peer_change(
1913                &self.config,
1914                &self.trusted_peer_change_tx,
1915                &new_epoch_start_state,
1916            );
1917
1918            let mut validator_components_lock_guard = self.validator_components.lock().await;
1919
1920            // The following code handles 4 different cases, depending on whether the node
1921            // was a validator in the previous epoch, and whether the node is a validator
1922            // in the new epoch.
1923            let new_epoch_store = self
1924                .reconfigure_state(
1925                    &self.state,
1926                    &cur_epoch_store,
1927                    next_epoch_committee.clone(),
1928                    new_epoch_start_state,
1929                    accumulator.clone(),
1930                )
1931                .await?;
1932
1933            let new_validator_components = if let Some(ValidatorComponents {
1934                validator_server_handle,
1935                validator_overload_monitor_handle,
1936                consensus_manager,
1937                consensus_store_pruner,
1938                consensus_adapter,
1939                mut checkpoint_service_tasks,
1940                checkpoint_metrics,
1941                iota_tx_validator_metrics,
1942                validator_registry_id,
1943            }) = validator_components_lock_guard.take()
1944            {
1945                info!("Reconfiguring the validator.");
1946                // Cancel the old checkpoint service tasks.
1947                // Waiting for checkpoint builder to finish gracefully is not possible, because
1948                // it may wait on transactions while consensus on peers have
1949                // already shut down.
1950                checkpoint_service_tasks.abort_all();
1951                while let Some(result) = checkpoint_service_tasks.join_next().await {
1952                    if let Err(err) = result {
1953                        if err.is_panic() {
1954                            std::panic::resume_unwind(err.into_panic());
1955                        }
1956                        warn!("Error in checkpoint service task: {:?}", err);
1957                    }
1958                }
1959                info!("Checkpoint service has shut down.");
1960
1961                consensus_manager.shutdown().await;
1962                info!("Consensus has shut down.");
1963
1964                info!("Epoch store finished reconfiguration.");
1965
1966                // No other components should be holding a strong reference to state accumulator
1967                // at this point. Confirm here before we swap in the new accumulator.
1968                let accumulator_metrics = Arc::into_inner(accumulator)
1969                    .expect("Accumulator should have no other references at this point")
1970                    .metrics();
1971                let new_accumulator = Arc::new(StateAccumulator::new(
1972                    self.state.get_accumulator_store().clone(),
1973                    accumulator_metrics,
1974                ));
1975                let weak_accumulator = Arc::downgrade(&new_accumulator);
1976                *accumulator_guard = Some(new_accumulator);
1977
1978                consensus_store_pruner.prune(next_epoch).await;
1979
1980                if self.state.is_committee_validator(&new_epoch_store) {
1981                    // Only restart consensus if this node is still a validator in the new epoch.
1982                    Some(
1983                        Self::start_epoch_specific_validator_components(
1984                            &self.config,
1985                            self.state.clone(),
1986                            consensus_adapter,
1987                            self.checkpoint_store.clone(),
1988                            new_epoch_store.clone(),
1989                            self.state_sync_handle.clone(),
1990                            self.randomness_handle.clone(),
1991                            consensus_manager,
1992                            consensus_store_pruner,
1993                            weak_accumulator,
1994                            self.backpressure_manager.clone(),
1995                            validator_server_handle,
1996                            validator_overload_monitor_handle,
1997                            checkpoint_metrics,
1998                            self.metrics.clone(),
1999                            iota_tx_validator_metrics,
2000                            validator_registry_id,
2001                        )
2002                        .await?,
2003                    )
2004                } else {
2005                    info!("This node is no longer a validator after reconfiguration");
2006                    if self.registry_service.remove(validator_registry_id) {
2007                        debug!("Removed validator metrics registry");
2008                    } else {
2009                        warn!("Failed to remove validator metrics registry");
2010                    }
2011                    validator_server_handle.shutdown();
2012                    debug!("Validator grpc server shutdown triggered");
2013
2014                    None
2015                }
2016            } else {
2017                // No other components should be holding a strong reference to state accumulator
2018                // at this point. Confirm here before we swap in the new accumulator.
2019                let accumulator_metrics = Arc::into_inner(accumulator)
2020                    .expect("Accumulator should have no other references at this point")
2021                    .metrics();
2022                let new_accumulator = Arc::new(StateAccumulator::new(
2023                    self.state.get_accumulator_store().clone(),
2024                    accumulator_metrics,
2025                ));
2026                let weak_accumulator = Arc::downgrade(&new_accumulator);
2027                *accumulator_guard = Some(new_accumulator);
2028
2029                if self.state.is_committee_validator(&new_epoch_store) {
2030                    info!("Promoting the node from fullnode to validator, starting grpc server");
2031
2032                    let mut components = Self::construct_validator_components(
2033                        self.config.clone(),
2034                        self.state.clone(),
2035                        Arc::new(next_epoch_committee.clone()),
2036                        new_epoch_store.clone(),
2037                        self.checkpoint_store.clone(),
2038                        self.state_sync_handle.clone(),
2039                        self.randomness_handle.clone(),
2040                        weak_accumulator,
2041                        self.backpressure_manager.clone(),
2042                        self.connection_monitor_status.clone(),
2043                        &self.registry_service,
2044                        self.metrics.clone(),
2045                    )
2046                    .await?;
2047
2048                    components.validator_server_handle =
2049                        components.validator_server_handle.start().await;
2050
2051                    Some(components)
2052                } else {
2053                    None
2054                }
2055            };
2056            *validator_components_lock_guard = new_validator_components;
2057
2058            // Force releasing current epoch store DB handle, because the
2059            // Arc<AuthorityPerEpochStore> may linger.
2060            cur_epoch_store.release_db_handles();
2061
2062            if cfg!(msim)
2063                && !matches!(
2064                    self.config
2065                        .authority_store_pruning_config
2066                        .num_epochs_to_retain_for_checkpoints(),
2067                    None | Some(u64::MAX) | Some(0)
2068                )
2069            {
2070                self.state
2071                .prune_checkpoints_for_eligible_epochs_for_testing(
2072                    self.config.clone(),
2073                    iota_core::authority::authority_store_pruner::AuthorityStorePruningMetrics::new_for_test(),
2074                )
2075                .await?;
2076            }
2077
2078            epoch_store = new_epoch_store;
2079            info!("Reconfiguration finished");
2080        }
2081    }
2082
2083    async fn shutdown(&self) {
2084        if let Some(validator_components) = &*self.validator_components.lock().await {
2085            validator_components.consensus_manager.shutdown().await;
2086        }
2087
2088        // Shutdown the gRPC server if it's running
2089        if let Some(grpc_handle) = self.grpc_server_handle.lock().await.take() {
2090            info!("Shutting down gRPC server");
2091            if let Err(e) = grpc_handle.shutdown().await {
2092                warn!("Failed to gracefully shutdown gRPC server: {e}");
2093            }
2094        }
2095    }
2096
2097    /// Asynchronously reconfigures the state of the authority node for the next
2098    /// epoch.
2099    async fn reconfigure_state(
2100        &self,
2101        state: &Arc<AuthorityState>,
2102        cur_epoch_store: &AuthorityPerEpochStore,
2103        next_epoch_committee: Committee,
2104        next_epoch_start_system_state: EpochStartSystemState,
2105        accumulator: Arc<StateAccumulator>,
2106    ) -> IotaResult<Arc<AuthorityPerEpochStore>> {
2107        let next_epoch = next_epoch_committee.epoch();
2108
2109        let last_checkpoint = self
2110            .checkpoint_store
2111            .get_epoch_last_checkpoint(cur_epoch_store.epoch())
2112            .expect("Error loading last checkpoint for current epoch")
2113            .expect("Could not load last checkpoint for current epoch");
2114        let epoch_supply_change = last_checkpoint
2115            .end_of_epoch_data
2116            .as_ref()
2117            .ok_or_else(|| {
2118                IotaError::from("last checkpoint in epoch should contain end of epoch data")
2119            })?
2120            .epoch_supply_change;
2121
2122        let last_checkpoint_seq = *last_checkpoint.sequence_number();
2123
2124        assert_eq!(
2125            Some(last_checkpoint_seq),
2126            self.checkpoint_store
2127                .get_highest_executed_checkpoint_seq_number()
2128                .expect("Error loading highest executed checkpoint sequence number")
2129        );
2130
2131        let epoch_start_configuration = EpochStartConfiguration::new(
2132            next_epoch_start_system_state,
2133            *last_checkpoint.digest(),
2134            state.get_object_store().as_ref(),
2135            EpochFlag::default_flags_for_new_epoch(&state.config),
2136        )
2137        .expect("EpochStartConfiguration construction cannot fail");
2138
2139        let new_epoch_store = self
2140            .state
2141            .reconfigure(
2142                cur_epoch_store,
2143                self.config.supported_protocol_versions.unwrap(),
2144                next_epoch_committee,
2145                epoch_start_configuration,
2146                accumulator,
2147                &self.config.expensive_safety_check_config,
2148                epoch_supply_change,
2149                last_checkpoint_seq,
2150            )
2151            .await
2152            .expect("Reconfigure authority state cannot fail");
2153        info!(next_epoch, "Node State has been reconfigured");
2154        assert_eq!(next_epoch, new_epoch_store.epoch());
2155        self.state.get_reconfig_api().update_epoch_flags_metrics(
2156            cur_epoch_store.epoch_start_config().flags(),
2157            new_epoch_store.epoch_start_config().flags(),
2158        );
2159
2160        Ok(new_epoch_store)
2161    }
2162
2163    pub fn get_config(&self) -> &NodeConfig {
2164        &self.config
2165    }
2166
2167    async fn execute_transaction_immediately_at_zero_epoch(
2168        state: &Arc<AuthorityState>,
2169        epoch_store: &Arc<AuthorityPerEpochStore>,
2170        tx: &Transaction,
2171        span: tracing::Span,
2172    ) {
2173        let _guard = span.enter();
2174        let transaction =
2175            iota_types::executable_transaction::VerifiedExecutableTransaction::new_unchecked(
2176                iota_types::executable_transaction::ExecutableTransaction::new_from_data_and_sig(
2177                    tx.data().clone(),
2178                    iota_types::executable_transaction::CertificateProof::Checkpoint(0, 0),
2179                ),
2180            );
2181        state
2182            .try_execute_immediately(&transaction, None, epoch_store)
2183            .unwrap();
2184    }
2185
2186    pub fn randomness_handle(&self) -> randomness::Handle {
2187        self.randomness_handle.clone()
2188    }
2189
2190    /// Sends signed capability notification to committee validators for
2191    /// non-committee validators. This method implements retry logic to handle
2192    /// failed attempts to send the notification. It will retry sending the
2193    /// notification with an increasing interval until it receives a successful
2194    /// response from a f+1 committee members or 2f+1 non-retryable errors.
2195    async fn send_signed_capability_notification_to_committee_with_retry(
2196        &self,
2197        epoch_store: &Arc<AuthorityPerEpochStore>,
2198    ) {
2199        const INITIAL_RETRY_INTERVAL_SECS: u64 = 5;
2200        const RETRY_INTERVAL_INCREMENT_SECS: u64 = 5;
2201        const MAX_RETRY_INTERVAL_SECS: u64 = 300; // 5 minutes
2202
2203        // Create the capability notification once
2204        let config = epoch_store.protocol_config();
2205        let binary_config = to_binary_config(config);
2206
2207        // Create the capability notification
2208        let capabilities = AuthorityCapabilitiesV1::new(
2209            self.state.name,
2210            epoch_store.get_chain_identifier().chain(),
2211            self.config
2212                .supported_protocol_versions
2213                .expect("Supported versions should be populated")
2214                .truncate_below(config.version),
2215            self.state
2216                .get_available_system_packages(&binary_config)
2217                .await,
2218        );
2219
2220        // Sign the capabilities using the authority key pair from config
2221        let signature = AuthoritySignature::new_secure(
2222            &IntentMessage::new(
2223                Intent::iota_app(IntentScope::AuthorityCapabilities),
2224                &capabilities,
2225            ),
2226            &epoch_store.epoch(),
2227            self.config.authority_key_pair(),
2228        );
2229
2230        let request = HandleCapabilityNotificationRequestV1 {
2231            message: SignedAuthorityCapabilitiesV1::new_from_data_and_sig(capabilities, signature),
2232        };
2233
2234        let mut retry_interval = Duration::from_secs(INITIAL_RETRY_INTERVAL_SECS);
2235
2236        loop {
2237            let auth_agg = self.auth_agg.load();
2238            match auth_agg
2239                .send_capability_notification_to_quorum(request.clone())
2240                .await
2241            {
2242                Ok(_) => {
2243                    info!("Successfully sent capability notification to committee");
2244                    break;
2245                }
2246                Err(err) => {
2247                    match &err {
2248                        AggregatorSendCapabilityNotificationError::RetryableNotification {
2249                            errors,
2250                        } => {
2251                            warn!(
2252                                "Failed to send capability notification to committee (retryable error), will retry in {:?}: {:?}",
2253                                retry_interval, errors
2254                            );
2255                        }
2256                        AggregatorSendCapabilityNotificationError::NonRetryableNotification {
2257                            errors,
2258                        } => {
2259                            error!(
2260                                "Failed to send capability notification to committee (non-retryable error): {:?}",
2261                                errors
2262                            );
2263                            break;
2264                        }
2265                    };
2266
2267                    // Wait before retrying
2268                    tokio::time::sleep(retry_interval).await;
2269
2270                    // Increase retry interval for the next attempt, capped at max
2271                    retry_interval = std::cmp::min(
2272                        retry_interval + Duration::from_secs(RETRY_INTERVAL_INCREMENT_SECS),
2273                        Duration::from_secs(MAX_RETRY_INTERVAL_SECS),
2274                    );
2275                }
2276            }
2277        }
2278    }
2279}
2280
2281#[cfg(not(msim))]
2282impl IotaNode {
2283    async fn fetch_jwks(
2284        _authority: AuthorityName,
2285        provider: &OIDCProvider,
2286    ) -> IotaResult<Vec<(JwkId, JWK)>> {
2287        use fastcrypto_zkp::bn254::zk_login::fetch_jwks;
2288        let client = reqwest::Client::new();
2289        fetch_jwks(provider, &client)
2290            .await
2291            .map_err(|_| IotaError::JWKRetrieval)
2292    }
2293}
2294
2295#[cfg(msim)]
2296impl IotaNode {
2297    pub fn get_sim_node_id(&self) -> iota_simulator::task::NodeId {
2298        self.sim_state.sim_node.id()
2299    }
2300
2301    pub fn set_safe_mode_expected(&self, new_value: bool) {
2302        info!("Setting safe mode expected to {}", new_value);
2303        self.sim_state
2304            .sim_safe_mode_expected
2305            .store(new_value, Ordering::Relaxed);
2306    }
2307
2308    async fn fetch_jwks(
2309        authority: AuthorityName,
2310        provider: &OIDCProvider,
2311    ) -> IotaResult<Vec<(JwkId, JWK)>> {
2312        get_jwk_injector()(authority, provider)
2313    }
2314}
2315
2316enum SpawnOnce {
2317    // Mutex is only needed to make SpawnOnce Sync
2318    Unstarted(Mutex<BoxFuture<'static, Result<iota_network_stack::server::Server>>>),
2319    #[allow(unused)]
2320    Started(iota_http::ServerHandle),
2321}
2322
2323impl SpawnOnce {
2324    pub fn new(
2325        future: impl Future<Output = Result<iota_network_stack::server::Server>> + Send + 'static,
2326    ) -> Self {
2327        Self::Unstarted(Mutex::new(Box::pin(future)))
2328    }
2329
2330    pub async fn start(self) -> Self {
2331        match self {
2332            Self::Unstarted(future) => {
2333                let server = future
2334                    .into_inner()
2335                    .await
2336                    .unwrap_or_else(|err| panic!("Failed to start validator gRPC server: {err}"));
2337                let handle = server.handle().clone();
2338                tokio::spawn(async move {
2339                    if let Err(err) = server.serve().await {
2340                        info!("Server stopped: {err}");
2341                    }
2342                    info!("Server stopped");
2343                });
2344                Self::Started(handle)
2345            }
2346            Self::Started(_) => self,
2347        }
2348    }
2349
2350    pub fn shutdown(self) {
2351        if let SpawnOnce::Started(handle) = self {
2352            handle.trigger_shutdown();
2353        }
2354    }
2355}
2356
2357/// Notify [`DiscoveryEventLoop`] that a new list of trusted peers are now
2358/// available.
2359fn send_trusted_peer_change(
2360    config: &NodeConfig,
2361    sender: &watch::Sender<TrustedPeerChangeEvent>,
2362    new_epoch_start_state: &EpochStartSystemState,
2363) {
2364    let new_committee =
2365        new_epoch_start_state.get_validator_as_p2p_peers(config.authority_public_key());
2366
2367    sender.send_modify(|event| {
2368        core::mem::swap(&mut event.new_committee, &mut event.old_committee);
2369        event.new_committee = new_committee;
2370    })
2371}
2372
2373fn build_kv_store(
2374    state: &Arc<AuthorityState>,
2375    config: &NodeConfig,
2376    registry: &Registry,
2377) -> Result<Arc<TransactionKeyValueStore>> {
2378    let metrics = KeyValueStoreMetrics::new(registry);
2379    let db_store = TransactionKeyValueStore::new("rocksdb", metrics.clone(), state.clone());
2380
2381    let base_url = &config.transaction_kv_store_read_config.base_url;
2382
2383    if base_url.is_empty() {
2384        info!("no http kv store url provided, using local db only");
2385        return Ok(Arc::new(db_store));
2386    }
2387
2388    base_url.parse::<url::Url>().tap_err(|e| {
2389        error!(
2390            "failed to parse config.transaction_kv_store_config.base_url ({:?}) as url: {}",
2391            base_url, e
2392        )
2393    })?;
2394
2395    let http_store = HttpKVStore::new_kv(
2396        base_url,
2397        config.transaction_kv_store_read_config.cache_size,
2398        metrics.clone(),
2399    )?;
2400    info!("using local key-value store with fallback to http key-value store");
2401    Ok(Arc::new(FallbackTransactionKVStore::new_kv(
2402        db_store,
2403        http_store,
2404        metrics,
2405        "json_rpc_fallback",
2406    )))
2407}
2408
2409/// Builds and starts the gRPC server for the IOTA node based on the node's
2410/// configuration.
2411///
2412/// This function performs the following tasks:
2413/// 1. Checks if the node is a validator by inspecting the consensus
2414///    configuration; if so, it returns early as validators do not expose gRPC
2415///    APIs.
2416/// 2. Checks if gRPC is enabled in the configuration.
2417/// 3. Creates broadcast channels for checkpoint streaming.
2418/// 4. Initializes the gRPC checkpoint service.
2419/// 5. Spawns the gRPC server to listen for incoming connections.
2420///
2421/// Returns a tuple of optional broadcast channels for checkpoint summary and
2422/// data.
2423async fn build_grpc_server(
2424    config: &NodeConfig,
2425    state: Arc<AuthorityState>,
2426    state_sync_store: RocksDbStore,
2427    executor: Option<Arc<dyn iota_types::transaction_executor::TransactionExecutor>>,
2428) -> Result<Option<GrpcServerHandle>> {
2429    // Validators do not expose gRPC APIs
2430    if config.consensus_config().is_some() || !config.enable_grpc_api {
2431        return Ok(None);
2432    }
2433
2434    let Some(grpc_config) = &config.grpc_api_config else {
2435        return Err(anyhow!("gRPC API is enabled but no configuration provided"));
2436    };
2437
2438    // Get chain identifier from state directly
2439    let chain_id = state.get_chain_identifier();
2440
2441    let rest_read_store = Arc::new(RestReadStore::new(state.clone(), state_sync_store));
2442
2443    // Create cancellation token for proper shutdown hierarchy
2444    let shutdown_token = CancellationToken::new();
2445
2446    // Create GrpcReader
2447    let grpc_reader = Arc::new(GrpcReader::from_rest_state_reader(
2448        rest_read_store,
2449        Some(env!("CARGO_PKG_VERSION").to_string()),
2450    ));
2451
2452    // Pass the same token to both GrpcReader (already done above) and
2453    // start_grpc_server
2454    let handle = start_grpc_server(
2455        grpc_reader,
2456        executor,
2457        grpc_config.clone(),
2458        shutdown_token,
2459        chain_id,
2460    )
2461    .await?;
2462
2463    Ok(Some(handle))
2464}
2465
2466/// Builds and starts the HTTP server for the IOTA node, exposing JSON-RPC and
2467/// REST APIs based on the node's configuration.
2468///
2469/// This function performs the following tasks:
2470/// 1. Checks if the node is a validator by inspecting the consensus
2471///    configuration; if so, it returns early as validators do not expose these
2472///    APIs.
2473/// 2. Creates an Axum router to handle HTTP requests.
2474/// 3. Initializes the JSON-RPC server and registers various RPC modules based
2475///    on the node's state and configuration, including CoinApi,
2476///    TransactionBuilderApi, GovernanceApi, TransactionExecutionApi, and
2477///    IndexerApi.
2478/// 4. Optionally, if the REST API is enabled, nests the REST API router under
2479///    the `/api/v1` path.
2480/// 5. Binds the server to the specified JSON-RPC address and starts listening
2481///    for incoming connections.
2482pub async fn build_http_server(
2483    state: Arc<AuthorityState>,
2484    store: RocksDbStore,
2485    transaction_orchestrator: &Option<Arc<TransactionOrchestrator<NetworkAuthorityClient>>>,
2486    config: &NodeConfig,
2487    prometheus_registry: &Registry,
2488    _custom_runtime: Option<Handle>,
2489    software_version: &'static str,
2490) -> Result<Option<iota_http::ServerHandle>> {
2491    // Validators do not expose these APIs
2492    if config.consensus_config().is_some() {
2493        return Ok(None);
2494    }
2495
2496    let mut router = axum::Router::new();
2497
2498    let json_rpc_router = {
2499        let mut server = JsonRpcServerBuilder::new(
2500            env!("CARGO_PKG_VERSION"),
2501            prometheus_registry,
2502            config.policy_config.clone(),
2503            config.firewall_config.clone(),
2504        );
2505
2506        let kv_store = build_kv_store(&state, config, prometheus_registry)?;
2507
2508        let metrics = Arc::new(JsonRpcMetrics::new(prometheus_registry));
2509        server.register_module(ReadApi::new(
2510            state.clone(),
2511            kv_store.clone(),
2512            metrics.clone(),
2513        ))?;
2514        server.register_module(CoinReadApi::new(
2515            state.clone(),
2516            kv_store.clone(),
2517            metrics.clone(),
2518        )?)?;
2519
2520        // if run_with_range is enabled we want to prevent any transactions
2521        // run_with_range = None is normal operating conditions
2522        if config.run_with_range.is_none() {
2523            server.register_module(TransactionBuilderApi::new(state.clone()))?;
2524        }
2525        server.register_module(GovernanceReadApi::new(state.clone(), metrics.clone()))?;
2526
2527        if let Some(transaction_orchestrator) = transaction_orchestrator {
2528            server.register_module(TransactionExecutionApi::new(
2529                state.clone(),
2530                transaction_orchestrator.clone(),
2531                metrics.clone(),
2532            ))?;
2533        }
2534
2535        let iota_names_config = config
2536            .iota_names_config
2537            .clone()
2538            .unwrap_or_else(|| IotaNamesConfig::from_chain(&state.get_chain_identifier().chain()));
2539
2540        server.register_module(IndexerApi::new(
2541            state.clone(),
2542            ReadApi::new(state.clone(), kv_store.clone(), metrics.clone()),
2543            kv_store,
2544            metrics,
2545            iota_names_config,
2546            config.indexer_max_subscriptions,
2547        ))?;
2548        server.register_module(MoveUtils::new(state.clone()))?;
2549
2550        let server_type = config.jsonrpc_server_type();
2551
2552        server.to_router(server_type).await?
2553    };
2554
2555    router = router.merge(json_rpc_router);
2556
2557    if config.enable_rest_api {
2558        let mut rest_service = iota_rest_api::RestService::new(
2559            Arc::new(RestReadStore::new(state.clone(), store)),
2560            software_version,
2561        );
2562
2563        if let Some(config) = config.rest.clone() {
2564            rest_service.with_config(config);
2565        }
2566
2567        rest_service.with_metrics(RestMetrics::new(prometheus_registry));
2568
2569        if let Some(transaction_orchestrator) = transaction_orchestrator {
2570            rest_service.with_executor(transaction_orchestrator.clone())
2571        }
2572
2573        router = router.merge(rest_service.into_router());
2574    }
2575
2576    // TODO: Remove this health check when experimental REST API becomes default
2577    // This is a copy of the health check in crates/iota-rest-api/src/health.rs
2578    router = router
2579        .route("/health", axum::routing::get(health_check_handler))
2580        .route_layer(axum::Extension(state));
2581
2582    let layers = ServiceBuilder::new()
2583        .map_request(|mut request: axum::http::Request<_>| {
2584            if let Some(connect_info) = request.extensions().get::<iota_http::ConnectInfo>() {
2585                let axum_connect_info = axum::extract::ConnectInfo(connect_info.remote_addr);
2586                request.extensions_mut().insert(axum_connect_info);
2587            }
2588            request
2589        })
2590        .layer(axum::middleware::from_fn(server_timing_middleware));
2591
2592    router = router.layer(layers);
2593
2594    let handle = iota_http::Builder::new()
2595        .serve(&config.json_rpc_address, router)
2596        .map_err(|e| anyhow::anyhow!("{e}"))?;
2597    info!(local_addr =? handle.local_addr(), "IOTA JSON-RPC server listening on {}", handle.local_addr());
2598
2599    Ok(Some(handle))
2600}
2601
2602#[derive(Debug, serde::Serialize, serde::Deserialize)]
2603pub struct Threshold {
2604    pub threshold_seconds: Option<u32>,
2605}
2606
2607async fn health_check_handler(
2608    axum::extract::Query(Threshold { threshold_seconds }): axum::extract::Query<Threshold>,
2609    axum::Extension(state): axum::Extension<Arc<AuthorityState>>,
2610) -> impl axum::response::IntoResponse {
2611    if let Some(threshold_seconds) = threshold_seconds {
2612        // Attempt to get the latest checkpoint
2613        let summary = match state
2614            .get_checkpoint_store()
2615            .get_highest_executed_checkpoint()
2616        {
2617            Ok(Some(summary)) => summary,
2618            Ok(None) => {
2619                warn!("Highest executed checkpoint not found");
2620                return (axum::http::StatusCode::SERVICE_UNAVAILABLE, "down");
2621            }
2622            Err(err) => {
2623                warn!("Failed to retrieve highest executed checkpoint: {:?}", err);
2624                return (axum::http::StatusCode::SERVICE_UNAVAILABLE, "down");
2625            }
2626        };
2627
2628        // Calculate the threshold time based on the provided threshold_seconds
2629        let latest_chain_time = summary.timestamp();
2630        let threshold =
2631            std::time::SystemTime::now() - Duration::from_secs(threshold_seconds as u64);
2632
2633        // Check if the latest checkpoint is within the threshold
2634        if latest_chain_time < threshold {
2635            warn!(
2636                ?latest_chain_time,
2637                ?threshold,
2638                "failing health check due to checkpoint lag"
2639            );
2640            return (axum::http::StatusCode::SERVICE_UNAVAILABLE, "down");
2641        }
2642    }
2643    // if health endpoint is responding and no threshold is given, respond success
2644    (axum::http::StatusCode::OK, "up")
2645}
2646
2647#[cfg(not(test))]
2648fn max_tx_per_checkpoint(protocol_config: &ProtocolConfig) -> usize {
2649    protocol_config.max_transactions_per_checkpoint() as usize
2650}
2651
2652#[cfg(test)]
2653fn max_tx_per_checkpoint(_: &ProtocolConfig) -> usize {
2654    2
2655}