1use std::{collections::BTreeMap, env, path::PathBuf, sync::Arc};
6
7use anyhow::Result;
8use clap::*;
9use fastcrypto::encoding::Encoding;
10use futures::{StreamExt, future::join_all};
11use iota_archival::{read_manifest_as_json, write_manifest_from_json};
12use iota_config::{
13 Config,
14 genesis::Genesis,
15 object_storage_config::{ObjectStoreConfig, ObjectStoreType},
16};
17use iota_core::{authority_aggregator::AuthorityAggregatorBuilder, authority_client::AuthorityAPI};
18use iota_protocol_config::Chain;
19use iota_replay::{ReplayToolCommand, execute_replay_command};
20use iota_sdk::{IotaClient, IotaClientBuilder, rpc_types::IotaTransactionBlockResponseOptions};
21use iota_types::{
22 base_types::*,
23 crypto::AuthorityPublicKeyBytes,
24 messages_checkpoint::{CheckpointRequest, CheckpointResponse, CheckpointSequenceNumber},
25 messages_grpc::TransactionInfoRequest,
26 transaction::{SenderSignedData, Transaction},
27};
28use telemetry_subscribers::TracingHandle;
29
30use crate::{
31 ConciseObjectOutput, GroupedObjectOutput, SnapshotVerifyMode, VerboseObjectOutput,
32 check_completed_snapshot,
33 db_tool::{DbToolCommand, execute_db_tool_command, print_db_all_tables},
34 download_db_snapshot, download_formal_snapshot, dump_checkpoints_from_archive,
35 get_latest_available_epoch, get_object, get_transaction_block, make_clients,
36 restore_from_db_checkpoint, verify_archive, verify_archive_by_checksum,
37};
38
39#[derive(Parser, Clone, ValueEnum)]
40pub enum Verbosity {
41 Grouped,
42 Concise,
43 Verbose,
44}
45
46#[derive(Parser)]
47pub enum ToolCommand {
48 LockedObject {
51 #[arg(long, help = "The object ID to fetch")]
54 id: Option<ObjectID>,
55 #[arg(long)]
58 address: Option<IotaAddress>,
59 #[arg(long)]
61 fullnode_rpc_url: String,
62 #[arg(long)]
65 rescue: bool,
66 },
67
68 FetchObject {
70 #[arg(long, help = "The object ID to fetch")]
71 id: ObjectID,
72
73 #[arg(long, help = "Fetch object at a specific sequence")]
74 version: Option<u64>,
75
76 #[arg(
77 long,
78 help = "Validator to fetch from - if not specified, all validators are queried"
79 )]
80 validator: Option<AuthorityName>,
81
82 #[arg(long)]
84 fullnode_rpc_url: String,
85
86 #[arg(value_enum, long, default_value = "grouped", ignore_case = true)]
95 verbosity: Verbosity,
96
97 #[arg(long, help = "don't show header in concise output")]
98 concise_no_header: bool,
99 },
100
101 FetchTransaction {
103 #[arg(long)]
105 fullnode_rpc_url: String,
106
107 #[arg(long, help = "The transaction ID to fetch")]
108 digest: TransactionDigest,
109
110 #[arg(long = "show-tx")]
112 show_input_tx: bool,
113 },
114
115 DbTool {
117 #[arg(long)]
119 db_path: String,
120 #[command(subcommand)]
121 cmd: Option<DbToolCommand>,
122 },
123
124 VerifyArchive {
126 #[arg(long)]
127 genesis: PathBuf,
128 #[command(flatten)]
129 object_store_config: ObjectStoreConfig,
130 #[arg(default_value_t = 5)]
131 download_concurrency: usize,
132 },
133
134 PrintArchiveManifest {
136 #[command(flatten)]
137 object_store_config: ObjectStoreConfig,
138 },
139 UpdateArchiveManifest {
141 #[command(flatten)]
142 object_store_config: ObjectStoreConfig,
143 #[arg(long = "archive-path")]
144 archive_json_path: PathBuf,
145 },
146 #[command(name = "verify-archive-from-checksums")]
148 VerifyArchiveByChecksum {
149 #[command(flatten)]
150 object_store_config: ObjectStoreConfig,
151 #[arg(default_value_t = 5)]
152 download_concurrency: usize,
153 },
154
155 #[command(name = "dump-archive")]
157 DumpArchiveByChecksum {
158 #[command(flatten)]
159 object_store_config: ObjectStoreConfig,
160 #[arg(default_value_t = 0)]
161 start: u64,
162 end: u64,
163 #[arg(default_value_t = 80)]
164 max_content_length: usize,
165 },
166
167 DumpPackages {
175 #[arg(long, short)]
177 rpc_url: String,
178
179 #[arg(long, short)]
182 output_dir: PathBuf,
183
184 #[arg(long)]
187 before_checkpoint: Option<u64>,
188
189 #[arg(short, long)]
192 verbose: bool,
193 },
194
195 DumpValidators {
196 #[arg(long)]
197 genesis: PathBuf,
198
199 #[arg(
200 long,
201 help = "show concise output - name, authority key and network address"
202 )]
203 concise: bool,
204 },
205
206 DumpGenesis {
207 #[arg(long)]
208 genesis: PathBuf,
209 },
210
211 FetchCheckpoint {
215 #[arg(long)]
217 fullnode_rpc_url: String,
218
219 #[arg(long, help = "Fetch checkpoint at a specific sequence number")]
220 sequence_number: Option<CheckpointSequenceNumber>,
221 },
222
223 Anemo {
224 #[command(next_help_heading = "foo", flatten)]
225 args: anemo_cli::Args,
226 },
227
228 #[command(name = "restore-db")]
229 RestoreFromDBCheckpoint {
230 #[arg(long)]
231 config_path: PathBuf,
232 #[arg(long)]
233 db_checkpoint_path: PathBuf,
234 },
235
236 #[command(
237 name = "download-db-snapshot",
238 about = "Downloads the legacy database snapshot via cloud object store, outputs to local disk"
239 )]
240 DownloadDBSnapshot {
241 #[arg(long, conflicts_with = "latest")]
242 epoch: Option<u64>,
243 #[arg(long, help = "the path to write the downloaded snapshot files")]
244 path: PathBuf,
245 #[arg(long)]
247 skip_indexes: bool,
248 #[arg(long)]
251 num_parallel_downloads: Option<usize>,
252 #[arg(long, default_value = "mainnet")]
256 network: Chain,
257 #[arg(long, conflicts_with = "no_sign_request")]
260 snapshot_bucket: Option<String>,
261 #[arg(
263 long,
264 conflicts_with = "no_sign_request",
265 help = "Required if --no-sign-request is not set"
266 )]
267 snapshot_bucket_type: Option<ObjectStoreType>,
268 #[arg(long, help = "only used for testing, when --snapshot-bucket-type=FILE")]
271 snapshot_path: Option<PathBuf>,
272 #[arg(
274 long,
275 conflicts_with_all = &["snapshot_bucket", "snapshot_bucket_type"],
276 help = "if set, no authentication is needed for snapshot restore"
277 )]
278 no_sign_request: bool,
279 #[arg(
282 long,
283 conflicts_with = "epoch",
284 help = "defaults to latest available snapshot in chosen bucket"
285 )]
286 latest: bool,
287 #[arg(long)]
290 verbose: bool,
291 },
292
293 #[command(
295 about = "Downloads formal database snapshot via cloud object store, outputs to local disk"
296 )]
297 DownloadFormalSnapshot {
298 #[arg(long, conflicts_with = "latest")]
299 epoch: Option<u64>,
300 #[arg(long)]
301 genesis: PathBuf,
302 #[arg(long)]
303 path: PathBuf,
304 #[arg(long)]
307 num_parallel_downloads: Option<usize>,
308 #[arg(long, default_value = "normal")]
310 verify: Option<SnapshotVerifyMode>,
311 #[arg(long, default_value = "mainnet")]
315 network: Chain,
316 #[arg(long, conflicts_with = "no_sign_request")]
319 snapshot_bucket: Option<String>,
320 #[arg(
322 long,
323 conflicts_with = "no_sign_request",
324 help = "Required if --no-sign-request is not set"
325 )]
326 snapshot_bucket_type: Option<ObjectStoreType>,
327 #[arg(long)]
330 snapshot_path: Option<PathBuf>,
331 #[arg(
333 long,
334 conflicts_with_all = &["snapshot_bucket", "snapshot_bucket_type"],
335 help = "if set, no authentication is needed for snapshot restore"
336 )]
337 no_sign_request: bool,
338 #[arg(
341 long,
342 conflicts_with = "epoch",
343 help = "defaults to latest available snapshot in chosen bucket"
344 )]
345 latest: bool,
346 #[arg(long)]
349 verbose: bool,
350
351 #[arg(long)]
358 all_checkpoints: bool,
359 },
360
361 Replay {
362 #[arg(long = "rpc")]
363 rpc_url: Option<String>,
364 #[arg(long)]
365 safety_checks: bool,
366 #[arg(long = "authority")]
367 use_authority: bool,
368 #[arg(
369 long,
370 short,
371 help = "Path to the network config file. This should be specified when rpc_url is not present. \
372 If not specified we will use the default network config file at ~/.iota-replay/network-config.yaml"
373 )]
374 cfg_path: Option<PathBuf>,
375 #[arg(
376 long,
377 help = "The name of the chain to replay from, could be one of: mainnet, testnet, devnet.\
378 When rpc_url is not specified, this is used to load the corresponding config from the network config file.\
379 If not specified, mainnet will be used by default"
380 )]
381 chain: Option<String>,
382 #[command(subcommand)]
383 cmd: ReplayToolCommand,
384 },
385
386 SignTransaction {
388 #[arg(long)]
389 genesis: PathBuf,
390
391 #[arg(
392 long,
393 help = "The Base64-encoding of the bcs bytes of SenderSignedData"
394 )]
395 sender_signed_data: String,
396 },
397}
398
399async fn check_locked_object(
400 iota_client: &Arc<IotaClient>,
401 committee: Arc<BTreeMap<AuthorityPublicKeyBytes, u64>>,
402 id: ObjectID,
403 rescue: bool,
404) -> anyhow::Result<()> {
405 let clients = Arc::new(make_clients(iota_client).await?);
406 let output = get_object(id, None, None, clients.clone()).await?;
407 let output = GroupedObjectOutput::new(output, committee);
408 if output.fully_locked {
409 println!("Object {id} is fully locked.");
410 return Ok(());
411 }
412 let top_record = output.voting_power.first().unwrap();
413 let top_record_stake = top_record.1;
414 let top_record = top_record.0.unwrap();
415 if top_record.4.is_none() {
416 println!(
417 "Object {id} does not seem to be locked by majority of validators (unlocked stake: {top_record_stake})"
418 );
419 return Ok(());
420 }
421
422 let tx_digest = top_record.2;
423 if !rescue {
424 println!("Object {id} is rescueable, top tx: {tx_digest:?}");
425 return Ok(());
426 }
427 println!("Object {id} is rescueable, trying tx {tx_digest}");
428 let validator = output
429 .grouped_results
430 .get(&Some(top_record))
431 .unwrap()
432 .first()
433 .unwrap();
434 let client = &clients.get(validator).unwrap().1;
435 let tx = client
436 .handle_transaction_info_request(TransactionInfoRequest {
437 transaction_digest: tx_digest,
438 })
439 .await?
440 .transaction;
441 let res = iota_client
442 .quorum_driver_api()
443 .execute_transaction_block(
444 Transaction::new(tx),
445 IotaTransactionBlockResponseOptions::full_content(),
446 None,
447 )
448 .await;
449 match res {
450 Ok(_) => {
451 println!("Transaction executed successfully ({tx_digest:?})");
452 }
453 Err(e) => {
454 println!("Failed to execute transaction ({tx_digest:?}): {e:?}");
455 }
456 }
457 Ok(())
458}
459
460impl ToolCommand {
461 pub async fn execute(self, tracing_handle: TracingHandle) -> Result<(), anyhow::Error> {
462 match self {
463 ToolCommand::LockedObject {
464 id,
465 fullnode_rpc_url,
466 rescue,
467 address,
468 } => {
469 let iota_client =
470 Arc::new(IotaClientBuilder::default().build(fullnode_rpc_url).await?);
471 let committee = Arc::new(
472 iota_client
473 .governance_api()
474 .get_committee_info(None)
475 .await?
476 .validators
477 .into_iter()
478 .collect::<BTreeMap<_, _>>(),
479 );
480 let object_ids = match id {
481 Some(id) => vec![id],
482 None => {
483 let address = address.expect("Either id or address must be provided");
484 iota_client
485 .coin_read_api()
486 .get_coins_stream(address, None)
487 .map(|c| c.coin_object_id)
488 .collect()
489 .await
490 }
491 };
492 for ids in object_ids.chunks(30) {
493 let mut tasks = vec![];
494 for id in ids {
495 tasks.push(check_locked_object(
496 &iota_client,
497 committee.clone(),
498 *id,
499 rescue,
500 ))
501 }
502 join_all(tasks)
503 .await
504 .into_iter()
505 .collect::<Result<Vec<_>, _>>()?;
506 }
507 }
508 ToolCommand::FetchObject {
509 id,
510 validator,
511 version,
512 fullnode_rpc_url,
513 verbosity,
514 concise_no_header,
515 } => {
516 let iota_client =
517 Arc::new(IotaClientBuilder::default().build(fullnode_rpc_url).await?);
518 let clients = Arc::new(make_clients(&iota_client).await?);
519 let output = get_object(id, version, validator, clients).await?;
520
521 match verbosity {
522 Verbosity::Grouped => {
523 let committee = Arc::new(
524 iota_client
525 .governance_api()
526 .get_committee_info(None)
527 .await?
528 .validators
529 .into_iter()
530 .collect::<BTreeMap<_, _>>(),
531 );
532 println!("{}", GroupedObjectOutput::new(output, committee));
533 }
534 Verbosity::Verbose => {
535 println!("{}", VerboseObjectOutput(output));
536 }
537 Verbosity::Concise => {
538 if !concise_no_header {
539 println!("{}", ConciseObjectOutput::header());
540 }
541 println!("{}", ConciseObjectOutput(output));
542 }
543 }
544 }
545 ToolCommand::FetchTransaction {
546 digest,
547 show_input_tx,
548 fullnode_rpc_url,
549 } => {
550 print!(
551 "{}",
552 get_transaction_block(digest, show_input_tx, fullnode_rpc_url).await?
553 );
554 }
555 ToolCommand::DbTool { db_path, cmd } => {
556 let path = PathBuf::from(db_path);
557 match cmd {
558 Some(c) => execute_db_tool_command(path, c).await?,
559 None => print_db_all_tables(path)?,
560 }
561 }
562 ToolCommand::DumpPackages {
563 rpc_url,
564 output_dir,
565 before_checkpoint,
566 verbose,
567 } => {
568 if !verbose {
569 tracing_handle
570 .update_log("off")
571 .expect("Failed to update log level");
572 }
573
574 iota_package_dump::dump(rpc_url, output_dir, before_checkpoint).await?;
575 }
576 ToolCommand::DumpValidators { genesis, concise } => {
577 let genesis = Genesis::load(genesis).unwrap();
578 if !concise {
579 println!("{:#?}", genesis.validator_set_for_tooling());
580 } else {
581 for (i, val_info) in genesis.validator_set_for_tooling().iter().enumerate() {
582 let metadata = val_info.verified_metadata();
583 println!(
584 "#{:<2} {:<20} {:?} {:?} {}",
585 i,
586 metadata.name,
587 metadata.iota_pubkey_bytes().concise(),
588 metadata.net_address,
589 anemo::PeerId(metadata.network_pubkey.0.to_bytes()),
590 )
591 }
592 }
593 }
594 ToolCommand::DumpGenesis { genesis } => {
595 let genesis = Genesis::load(genesis)?;
596 println!("{genesis:#?}");
597 }
598 ToolCommand::FetchCheckpoint {
599 sequence_number,
600 fullnode_rpc_url,
601 } => {
602 let iota_client =
603 Arc::new(IotaClientBuilder::default().build(fullnode_rpc_url).await?);
604 let clients = make_clients(&iota_client).await?;
605
606 for (name, (_, client)) in clients {
607 let resp = client
608 .handle_checkpoint(CheckpointRequest {
609 sequence_number,
610 request_content: true,
611 certified: true,
612 })
613 .await
614 .unwrap();
615 let CheckpointResponse {
616 checkpoint,
617 contents,
618 } = resp;
619 println!("Validator: {:?}\n", name.concise());
620 println!("Checkpoint: {checkpoint:?}\n");
621 println!("Content: {contents:?}\n");
622 }
623 }
624 ToolCommand::Anemo { args } => {
625 let config = crate::make_anemo_config();
626 anemo_cli::run(config, args).await
627 }
628 ToolCommand::RestoreFromDBCheckpoint {
629 config_path,
630 db_checkpoint_path,
631 } => {
632 let config = iota_config::NodeConfig::load(config_path)?;
633 restore_from_db_checkpoint(&config, &db_checkpoint_path).await?;
634 }
635 ToolCommand::DownloadFormalSnapshot {
636 epoch,
637 genesis,
638 path,
639 num_parallel_downloads,
640 verify,
641 network,
642 snapshot_bucket,
643 snapshot_bucket_type,
644 snapshot_path,
645 no_sign_request,
646 latest,
647 verbose,
648 all_checkpoints,
649 } => {
650 if !verbose {
651 tracing_handle
652 .update_log("off")
653 .expect("Failed to update log level");
654 }
655 let num_parallel_downloads = num_parallel_downloads.unwrap_or_else(|| {
656 num_cpus::get()
657 .checked_sub(1)
658 .expect("Failed to get number of CPUs")
659 });
660 let snapshot_bucket =
661 snapshot_bucket.or_else(|| match (network, no_sign_request) {
662 (Chain::Mainnet, false) => Some(
663 env::var("MAINNET_FORMAL_SIGNED_BUCKET")
664 .unwrap_or("iota-mainnet-formal".to_string()),
665 ),
666 (Chain::Mainnet, true) => env::var("MAINNET_FORMAL_UNSIGNED_BUCKET").ok(),
667 (Chain::Testnet, true) => env::var("TESTNET_FORMAL_UNSIGNED_BUCKET").ok(),
668 (Chain::Testnet, _) => Some(
669 env::var("TESTNET_FORMAL_SIGNED_BUCKET")
670 .unwrap_or("iota-testnet-formal".to_string()),
671 ),
672 (Chain::Unknown, _) => {
673 panic!("Cannot generate default snapshot bucket for unknown network");
674 }
675 });
676
677 let aws_endpoint = env::var("AWS_SNAPSHOT_ENDPOINT").ok().or_else(|| {
678 if no_sign_request {
679 if network == Chain::Mainnet {
680 Some("https://formal-snapshot.mainnet.iota.cafe".to_string())
681 } else if network == Chain::Testnet {
682 Some("https://formal-snapshot.testnet.iota.cafe".to_string())
683 } else {
684 None
685 }
686 } else {
687 None
688 }
689 });
690
691 let snapshot_bucket_type = if no_sign_request {
692 ObjectStoreType::S3
693 } else {
694 snapshot_bucket_type
695 .expect("You must set either --snapshot-bucket-type or --no-sign-request")
696 };
697 let snapshot_store_config = match snapshot_bucket_type {
698 ObjectStoreType::S3 => ObjectStoreConfig {
699 object_store: Some(ObjectStoreType::S3),
700 bucket: snapshot_bucket.filter(|s| !s.is_empty()),
701 aws_access_key_id: env::var("AWS_SNAPSHOT_ACCESS_KEY_ID").ok(),
702 aws_secret_access_key: env::var("AWS_SNAPSHOT_SECRET_ACCESS_KEY").ok(),
703 aws_region: env::var("AWS_SNAPSHOT_REGION").ok(),
704 aws_endpoint: aws_endpoint.filter(|s| !s.is_empty()),
705 aws_virtual_hosted_style_request: env::var(
706 "AWS_SNAPSHOT_VIRTUAL_HOSTED_REQUESTS",
707 )
708 .ok()
709 .and_then(|b| b.parse().ok())
710 .unwrap_or(no_sign_request),
711 object_store_connection_limit: 200,
712 no_sign_request,
713 ..Default::default()
714 },
715 ObjectStoreType::GCS => ObjectStoreConfig {
716 object_store: Some(ObjectStoreType::GCS),
717 bucket: snapshot_bucket,
718 google_service_account: env::var("GCS_SNAPSHOT_SERVICE_ACCOUNT_FILE_PATH")
719 .ok(),
720 object_store_connection_limit: 200,
721 no_sign_request,
722 ..Default::default()
723 },
724 ObjectStoreType::Azure => ObjectStoreConfig {
725 object_store: Some(ObjectStoreType::Azure),
726 bucket: snapshot_bucket,
727 azure_storage_account: env::var("AZURE_SNAPSHOT_STORAGE_ACCOUNT").ok(),
728 azure_storage_access_key: env::var("AZURE_SNAPSHOT_STORAGE_ACCESS_KEY")
729 .ok(),
730 object_store_connection_limit: 200,
731 no_sign_request,
732 ..Default::default()
733 },
734 ObjectStoreType::File => {
735 if snapshot_path.is_some() {
736 ObjectStoreConfig {
737 object_store: Some(ObjectStoreType::File),
738 directory: snapshot_path,
739 ..Default::default()
740 }
741 } else {
742 panic!(
743 "--snapshot-path must be specified for --snapshot-bucket-type=file"
744 );
745 }
746 }
747 };
748
749 let archive_bucket = Some(
750 env::var("FORMAL_SNAPSHOT_ARCHIVE_BUCKET").unwrap_or_else(|_| match network {
751 Chain::Mainnet => "iota-mainnet-archive".to_string(),
752 Chain::Testnet => "iota-testnet-archive".to_string(),
753 Chain::Unknown => {
754 panic!("Cannot generate default archive bucket for unknown network");
755 }
756 }),
757 );
758
759 let mut custom_archive_enabled = false;
760 if let Ok(custom_archive_check) = env::var("CUSTOM_ARCHIVE_BUCKET") {
761 if custom_archive_check == "true" {
762 custom_archive_enabled = true;
763 }
764 }
765 let archive_store_config = if custom_archive_enabled {
766 let aws_region = Some(
767 env::var("FORMAL_SNAPSHOT_ARCHIVE_REGION")
768 .unwrap_or("us-west-2".to_string()),
769 );
770
771 let archive_bucket_type = env::var("FORMAL_SNAPSHOT_ARCHIVE_BUCKET_TYPE").expect("If setting `CUSTOM_ARCHIVE_BUCKET=true` Must set FORMAL_SNAPSHOT_ARCHIVE_BUCKET_TYPE, and credentials");
772 match archive_bucket_type.to_ascii_lowercase().as_str() {
773 "s3" => ObjectStoreConfig {
774 object_store: Some(ObjectStoreType::S3),
775 bucket: archive_bucket.filter(|s| !s.is_empty()),
776 aws_access_key_id: env::var("AWS_ARCHIVE_ACCESS_KEY_ID").ok(),
777 aws_secret_access_key: env::var("AWS_ARCHIVE_SECRET_ACCESS_KEY").ok(),
778 aws_region,
779 aws_endpoint: env::var("AWS_ARCHIVE_ENDPOINT").ok(),
780 aws_virtual_hosted_style_request: env::var(
781 "AWS_ARCHIVE_VIRTUAL_HOSTED_REQUESTS",
782 )
783 .ok()
784 .and_then(|b| b.parse().ok())
785 .unwrap_or(false),
786 object_store_connection_limit: 50,
787 no_sign_request: false,
788 ..Default::default()
789 },
790 "gcs" => ObjectStoreConfig {
791 object_store: Some(ObjectStoreType::GCS),
792 bucket: archive_bucket,
793 google_service_account: env::var(
794 "GCS_ARCHIVE_SERVICE_ACCOUNT_FILE_PATH",
795 )
796 .ok(),
797 object_store_connection_limit: 50,
798 no_sign_request: false,
799 ..Default::default()
800 },
801 "azure" => ObjectStoreConfig {
802 object_store: Some(ObjectStoreType::Azure),
803 bucket: archive_bucket,
804 azure_storage_account: env::var("AZURE_ARCHIVE_STORAGE_ACCOUNT").ok(),
805 azure_storage_access_key: env::var("AZURE_ARCHIVE_STORAGE_ACCESS_KEY")
806 .ok(),
807 object_store_connection_limit: 50,
808 no_sign_request: false,
809 ..Default::default()
810 },
811 _ => panic!(
812 "If setting `CUSTOM_ARCHIVE_BUCKET=true` must set FORMAL_SNAPSHOT_ARCHIVE_BUCKET_TYPE to one of 'gcs', 'azure', or 's3' "
813 ),
814 }
815 } else {
816 let aws_endpoint = env::var("AWS_ARCHIVE_ENDPOINT").ok().or_else(|| {
819 if network == Chain::Mainnet {
820 Some("https://archive.mainnet.iota.cafe".to_string())
821 } else if network == Chain::Testnet {
822 Some("https://archive.testnet.iota.cafe".to_string())
823 } else {
824 None
825 }
826 });
827
828 let aws_virtual_hosted_style_request =
829 env::var("AWS_ARCHIVE_VIRTUAL_HOSTED_REQUESTS")
830 .ok()
831 .and_then(|b| b.parse().ok())
832 .unwrap_or(matches!(network, Chain::Mainnet | Chain::Testnet));
833
834 ObjectStoreConfig {
835 object_store: Some(ObjectStoreType::S3),
836 bucket: archive_bucket.filter(|s| !s.is_empty()),
837 aws_region: Some("us-west-2".to_string()),
838 aws_endpoint,
839 aws_virtual_hosted_style_request,
840 object_store_connection_limit: 200,
841 no_sign_request: true,
842 ..Default::default()
843 }
844 };
845 let latest_available_epoch =
846 latest.then_some(get_latest_available_epoch(&snapshot_store_config).await?);
847 let epoch_to_download = epoch.or(latest_available_epoch).expect(
848 "Either pass epoch with --epoch <epoch_num> or use latest with --latest",
849 );
850
851 if let Err(e) =
852 check_completed_snapshot(&snapshot_store_config, epoch_to_download).await
853 {
854 panic!("Aborting snapshot restore: {e}, snapshot may not be uploaded yet");
855 }
856
857 let verify = verify.unwrap_or_default();
858 download_formal_snapshot(
859 &path,
860 epoch_to_download,
861 &genesis,
862 snapshot_store_config,
863 archive_store_config,
864 num_parallel_downloads,
865 network,
866 verify,
867 all_checkpoints,
868 )
869 .await?;
870 }
871 ToolCommand::DownloadDBSnapshot {
872 epoch,
873 path,
874 skip_indexes,
875 num_parallel_downloads,
876 network,
877 snapshot_bucket,
878 snapshot_bucket_type,
879 snapshot_path,
880 no_sign_request,
881 latest,
882 verbose,
883 } => {
884 if !verbose {
885 tracing_handle
886 .update_log("off")
887 .expect("Failed to update log level");
888 }
889 let num_parallel_downloads = num_parallel_downloads.unwrap_or_else(|| {
890 num_cpus::get()
891 .checked_sub(1)
892 .expect("Failed to get number of CPUs")
893 });
894 let snapshot_bucket =
895 snapshot_bucket.or_else(|| match (network, no_sign_request) {
896 (Chain::Mainnet, false) => Some(
897 env::var("MAINNET_DB_SIGNED_BUCKET")
898 .unwrap_or("iota-mainnet-snapshots".to_string()),
899 ),
900 (Chain::Mainnet, true) => env::var("MAINNET_DB_UNSIGNED_BUCKET").ok(),
901 (Chain::Testnet, true) => env::var("TESTNET_DB_UNSIGNED_BUCKET").ok(),
902 (Chain::Testnet, _) => Some(
903 env::var("TESTNET_DB_SIGNED_BUCKET")
904 .unwrap_or("iota-testnet-snapshots".to_string()),
905 ),
906 (Chain::Unknown, _) => {
907 panic!("Cannot generate default snapshot bucket for unknown network");
908 }
909 });
910
911 let aws_endpoint = env::var("AWS_SNAPSHOT_ENDPOINT").ok();
912 let snapshot_bucket_type = if no_sign_request {
913 ObjectStoreType::S3
914 } else {
915 snapshot_bucket_type
916 .expect("You must set either --snapshot-bucket-type or --no-sign-request")
917 };
918 let snapshot_store_config = if no_sign_request {
919 let aws_endpoint = env::var("AWS_SNAPSHOT_ENDPOINT").ok().or_else(|| {
920 if network == Chain::Mainnet {
921 Some("https://rocksdb-snapshot.mainnet.iota.cafe".to_string())
922 } else if network == Chain::Testnet {
923 Some("https://rocksdb-snapshot.testnet.iota.cafe".to_string())
924 } else {
925 None
926 }
927 });
928 ObjectStoreConfig {
929 object_store: Some(ObjectStoreType::S3),
930 aws_endpoint: aws_endpoint.filter(|s| !s.is_empty()),
931 aws_virtual_hosted_style_request: env::var(
932 "AWS_SNAPSHOT_VIRTUAL_HOSTED_REQUESTS",
933 )
934 .ok()
935 .and_then(|b| b.parse().ok())
936 .unwrap_or(no_sign_request),
937 object_store_connection_limit: 200,
938 no_sign_request,
939 ..Default::default()
940 }
941 } else {
942 match snapshot_bucket_type {
943 ObjectStoreType::S3 => ObjectStoreConfig {
944 object_store: Some(ObjectStoreType::S3),
945 bucket: snapshot_bucket.filter(|s| !s.is_empty()),
946 aws_access_key_id: env::var("AWS_SNAPSHOT_ACCESS_KEY_ID").ok(),
947 aws_secret_access_key: env::var("AWS_SNAPSHOT_SECRET_ACCESS_KEY").ok(),
948 aws_region: env::var("AWS_SNAPSHOT_REGION").ok(),
949 aws_endpoint: aws_endpoint.filter(|s| !s.is_empty()),
950 aws_virtual_hosted_style_request: env::var(
951 "AWS_SNAPSHOT_VIRTUAL_HOSTED_REQUESTS",
952 )
953 .ok()
954 .and_then(|b| b.parse().ok())
955 .unwrap_or(no_sign_request),
956 object_store_connection_limit: 200,
957 no_sign_request,
958 ..Default::default()
959 },
960 ObjectStoreType::GCS => ObjectStoreConfig {
961 object_store: Some(ObjectStoreType::GCS),
962 bucket: snapshot_bucket,
963 google_service_account: env::var(
964 "GCS_SNAPSHOT_SERVICE_ACCOUNT_FILE_PATH",
965 )
966 .ok(),
967 google_project_id: env::var("GCS_SNAPSHOT_SERVICE_ACCOUNT_PROJECT_ID")
968 .ok(),
969 object_store_connection_limit: 200,
970 no_sign_request,
971 ..Default::default()
972 },
973 ObjectStoreType::Azure => ObjectStoreConfig {
974 object_store: Some(ObjectStoreType::Azure),
975 bucket: snapshot_bucket,
976 azure_storage_account: env::var("AZURE_SNAPSHOT_STORAGE_ACCOUNT").ok(),
977 azure_storage_access_key: env::var("AZURE_SNAPSHOT_STORAGE_ACCESS_KEY")
978 .ok(),
979 object_store_connection_limit: 200,
980 no_sign_request,
981 ..Default::default()
982 },
983 ObjectStoreType::File => {
984 if snapshot_path.is_some() {
985 ObjectStoreConfig {
986 object_store: Some(ObjectStoreType::File),
987 directory: snapshot_path,
988 ..Default::default()
989 }
990 } else {
991 panic!(
992 "--snapshot-path must be specified for --snapshot-bucket-type=file"
993 );
994 }
995 }
996 }
997 };
998
999 let latest_available_epoch =
1000 latest.then_some(get_latest_available_epoch(&snapshot_store_config).await?);
1001 let epoch_to_download = epoch.or(latest_available_epoch).expect(
1002 "Either pass epoch with --epoch <epoch_num> or use latest with --latest",
1003 );
1004
1005 if let Err(e) =
1006 check_completed_snapshot(&snapshot_store_config, epoch_to_download).await
1007 {
1008 panic!("Aborting snapshot restore: {e}, snapshot may not be uploaded yet");
1009 }
1010 download_db_snapshot(
1011 &path,
1012 epoch_to_download,
1013 snapshot_store_config,
1014 skip_indexes,
1015 num_parallel_downloads,
1016 )
1017 .await?;
1018 }
1019 ToolCommand::Replay {
1020 rpc_url,
1021 safety_checks,
1022 cmd,
1023 use_authority,
1024 cfg_path,
1025 chain,
1026 } => {
1027 execute_replay_command(rpc_url, safety_checks, use_authority, cfg_path, chain, cmd)
1028 .await?;
1029 }
1030 ToolCommand::VerifyArchive {
1031 genesis,
1032 object_store_config,
1033 download_concurrency,
1034 } => {
1035 verify_archive(&genesis, object_store_config, download_concurrency, true).await?;
1036 }
1037 ToolCommand::PrintArchiveManifest {
1038 object_store_config,
1039 } => {
1040 println!("{}", read_manifest_as_json(object_store_config).await?);
1041 }
1042 ToolCommand::UpdateArchiveManifest {
1043 object_store_config,
1044 archive_json_path,
1045 } => {
1046 write_manifest_from_json(object_store_config, archive_json_path).await?;
1047 }
1048 ToolCommand::VerifyArchiveByChecksum {
1049 object_store_config,
1050 download_concurrency,
1051 } => {
1052 verify_archive_by_checksum(object_store_config, download_concurrency).await?;
1053 }
1054 ToolCommand::DumpArchiveByChecksum {
1055 object_store_config,
1056 start,
1057 end,
1058 max_content_length,
1059 } => {
1060 dump_checkpoints_from_archive(object_store_config, start, end, max_content_length)
1061 .await?;
1062 }
1063 ToolCommand::SignTransaction {
1064 genesis,
1065 sender_signed_data,
1066 } => {
1067 let genesis = Genesis::load(genesis)?;
1068 let sender_signed_data = bcs::from_bytes::<SenderSignedData>(
1069 &fastcrypto::encoding::Base64::decode(sender_signed_data.as_str()).unwrap(),
1070 )
1071 .unwrap();
1072 let transaction = Transaction::new(sender_signed_data);
1073 let (agg, _) =
1074 AuthorityAggregatorBuilder::from_genesis(&genesis).build_network_clients();
1075 let result = agg.process_transaction(transaction, None).await;
1076 println!("{result:?}");
1077 }
1078 };
1079 Ok(())
1080 }
1081}