1use std::{collections::BTreeMap, env, path::PathBuf, sync::Arc};
6
7use anyhow::Result;
8use clap::*;
9use fastcrypto::encoding::Encoding;
10use futures::{StreamExt, future::join_all};
11use iota_archival::{read_manifest_as_json, write_manifest_from_json};
12use iota_config::{
13 Config,
14 genesis::Genesis,
15 object_storage_config::{ObjectStoreConfig, ObjectStoreType},
16};
17use iota_core::{authority_aggregator::AuthorityAggregatorBuilder, authority_client::AuthorityAPI};
18use iota_protocol_config::Chain;
19use iota_replay::{ReplayToolCommand, execute_replay_command};
20use iota_sdk::{IotaClient, IotaClientBuilder, rpc_types::IotaTransactionBlockResponseOptions};
21use iota_types::{
22 base_types::*,
23 crypto::AuthorityPublicKeyBytes,
24 messages_checkpoint::{CheckpointRequest, CheckpointResponse, CheckpointSequenceNumber},
25 messages_grpc::TransactionInfoRequest,
26 transaction::{SenderSignedData, Transaction},
27};
28use telemetry_subscribers::TracingHandle;
29
30use crate::{
31 ConciseObjectOutput, GroupedObjectOutput, SnapshotVerifyMode, VerboseObjectOutput,
32 check_completed_snapshot,
33 db_tool::{DbToolCommand, execute_db_tool_command, print_db_all_tables},
34 download_db_snapshot, download_formal_snapshot, dump_checkpoints_from_archive,
35 get_latest_available_epoch, get_object, get_transaction_block, make_clients,
36 restore_from_db_checkpoint, verify_archive, verify_archive_by_checksum,
37};
38
39#[derive(Parser, Clone, ValueEnum)]
40pub enum Verbosity {
41 Grouped,
42 Concise,
43 Verbose,
44}
45
46#[derive(Parser)]
47pub enum ToolCommand {
48 LockedObject {
51 #[arg(long, help = "The object ID to fetch")]
54 id: Option<ObjectID>,
55 #[arg(long)]
58 address: Option<IotaAddress>,
59 #[arg(long)]
61 fullnode_rpc_url: String,
62 #[arg(long)]
65 rescue: bool,
66 },
67
68 FetchObject {
70 #[arg(long, help = "The object ID to fetch")]
71 id: ObjectID,
72
73 #[arg(long, help = "Fetch object at a specific sequence")]
74 version: Option<u64>,
75
76 #[arg(
77 long,
78 help = "Validator to fetch from - if not specified, all validators are queried"
79 )]
80 validator: Option<AuthorityName>,
81
82 #[arg(long)]
84 fullnode_rpc_url: String,
85
86 #[arg(value_enum, long, default_value = "grouped", ignore_case = true)]
95 verbosity: Verbosity,
96
97 #[arg(long, help = "don't show header in concise output")]
98 concise_no_header: bool,
99 },
100
101 FetchTransaction {
103 #[arg(long)]
105 fullnode_rpc_url: String,
106
107 #[arg(long, help = "The transaction ID to fetch")]
108 digest: TransactionDigest,
109
110 #[arg(long = "show-tx")]
112 show_input_tx: bool,
113 },
114
115 DbTool {
117 #[arg(long)]
119 db_path: String,
120 #[command(subcommand)]
121 cmd: Option<DbToolCommand>,
122 },
123
124 VerifyArchive {
126 #[arg(long)]
127 genesis: PathBuf,
128 #[command(flatten)]
129 object_store_config: ObjectStoreConfig,
130 #[arg(default_value_t = 5)]
131 download_concurrency: usize,
132 },
133
134 PrintArchiveManifest {
136 #[command(flatten)]
137 object_store_config: ObjectStoreConfig,
138 },
139 UpdateArchiveManifest {
141 #[command(flatten)]
142 object_store_config: ObjectStoreConfig,
143 #[arg(long = "archive-path")]
144 archive_json_path: PathBuf,
145 },
146 #[command(name = "verify-archive-from-checksums")]
148 VerifyArchiveByChecksum {
149 #[command(flatten)]
150 object_store_config: ObjectStoreConfig,
151 #[arg(default_value_t = 5)]
152 download_concurrency: usize,
153 },
154
155 #[command(name = "dump-archive")]
157 DumpArchiveByChecksum {
158 #[command(flatten)]
159 object_store_config: ObjectStoreConfig,
160 #[arg(default_value_t = 0)]
161 start: u64,
162 end: u64,
163 #[arg(default_value_t = 80)]
164 max_content_length: usize,
165 },
166
167 DumpPackages {
175 #[arg(long, short)]
177 rpc_url: String,
178
179 #[arg(long, short)]
182 output_dir: PathBuf,
183
184 #[arg(long)]
187 before_checkpoint: Option<u64>,
188
189 #[arg(short, long)]
192 verbose: bool,
193 },
194
195 DumpValidators {
196 #[arg(long)]
197 genesis: PathBuf,
198
199 #[arg(
200 long,
201 help = "show concise output - name, authority key and network address"
202 )]
203 concise: bool,
204 },
205
206 DumpGenesis {
207 #[arg(long)]
208 genesis: PathBuf,
209 },
210
211 FetchCheckpoint {
215 #[arg(long)]
217 fullnode_rpc_url: String,
218
219 #[arg(long, help = "Fetch checkpoint at a specific sequence number")]
220 sequence_number: Option<CheckpointSequenceNumber>,
221 },
222
223 Anemo {
224 #[command(next_help_heading = "foo", flatten)]
225 args: anemo_cli::Args,
226 },
227
228 #[command(name = "restore-db")]
229 RestoreFromDBCheckpoint {
230 #[arg(long)]
231 config_path: PathBuf,
232 #[arg(long)]
233 db_checkpoint_path: PathBuf,
234 },
235
236 #[command(
237 name = "download-db-snapshot",
238 about = "Downloads the legacy database snapshot via cloud object store, outputs to local disk"
239 )]
240 DownloadDBSnapshot {
241 #[arg(long, conflicts_with = "latest")]
242 epoch: Option<u64>,
243 #[arg(long, help = "the path to write the downloaded snapshot files")]
244 path: PathBuf,
245 #[arg(long)]
247 skip_indexes: bool,
248 #[arg(long)]
251 num_parallel_downloads: Option<usize>,
252 #[arg(long, default_value = "mainnet")]
256 network: Chain,
257 #[arg(long, conflicts_with = "no_sign_request")]
260 snapshot_bucket: Option<String>,
261 #[arg(
263 long,
264 conflicts_with = "no_sign_request",
265 help = "Required if --no-sign-request is not set"
266 )]
267 snapshot_bucket_type: Option<ObjectStoreType>,
268 #[arg(long, help = "only used for testing, when --snapshot-bucket-type=FILE")]
271 snapshot_path: Option<PathBuf>,
272 #[arg(
274 long,
275 conflicts_with_all = &["snapshot_bucket", "snapshot_bucket_type"],
276 help = "if set, no authentication is needed for snapshot restore"
277 )]
278 no_sign_request: bool,
279 #[arg(
282 long,
283 conflicts_with = "epoch",
284 help = "defaults to latest available snapshot in chosen bucket"
285 )]
286 latest: bool,
287 #[arg(long)]
290 verbose: bool,
291 },
292
293 #[command(
295 about = "Downloads formal database snapshot via cloud object store, outputs to local disk"
296 )]
297 DownloadFormalSnapshot {
298 #[arg(long, conflicts_with = "latest")]
299 epoch: Option<u64>,
300 #[arg(long)]
301 genesis: PathBuf,
302 #[arg(long)]
303 path: PathBuf,
304 #[arg(long)]
307 num_parallel_downloads: Option<usize>,
308 #[arg(long, default_value = "normal")]
310 verify: Option<SnapshotVerifyMode>,
311 #[arg(long, default_value = "mainnet")]
315 network: Chain,
316 #[arg(long, conflicts_with = "no_sign_request")]
319 snapshot_bucket: Option<String>,
320 #[arg(
322 long,
323 conflicts_with = "no_sign_request",
324 help = "Required if --no-sign-request is not set"
325 )]
326 snapshot_bucket_type: Option<ObjectStoreType>,
327 #[arg(long)]
330 snapshot_path: Option<PathBuf>,
331 #[arg(
333 long,
334 conflicts_with_all = &["snapshot_bucket", "snapshot_bucket_type"],
335 help = "if set, no authentication is needed for snapshot restore"
336 )]
337 no_sign_request: bool,
338 #[arg(
341 long,
342 conflicts_with = "epoch",
343 help = "defaults to latest available snapshot in chosen bucket"
344 )]
345 latest: bool,
346 #[arg(long)]
349 verbose: bool,
350
351 #[arg(long)]
358 all_checkpoints: bool,
359 },
360
361 Replay {
362 #[arg(long = "rpc")]
363 rpc_url: Option<String>,
364 #[arg(long)]
365 safety_checks: bool,
366 #[arg(long = "authority")]
367 use_authority: bool,
368 #[arg(
369 long,
370 short,
371 help = "Path to the network config file. This should be specified when rpc_url is not present. \
372 If not specified we will use the default network config file at ~/.iota-replay/network-config.yaml"
373 )]
374 cfg_path: Option<PathBuf>,
375 #[arg(
376 long,
377 help = "The name of the chain to replay from, could be one of: mainnet, testnet, devnet.\
378 When rpc_url is not specified, this is used to load the corresponding config from the network config file.\
379 If not specified, mainnet will be used by default"
380 )]
381 chain: Option<String>,
382 #[command(subcommand)]
383 cmd: ReplayToolCommand,
384 },
385
386 SignTransaction {
388 #[arg(long)]
389 genesis: PathBuf,
390
391 #[arg(
392 long,
393 help = "The Base64-encoding of the bcs bytes of SenderSignedData"
394 )]
395 sender_signed_data: String,
396 },
397}
398
399async fn check_locked_object(
400 iota_client: &Arc<IotaClient>,
401 committee: Arc<BTreeMap<AuthorityPublicKeyBytes, u64>>,
402 id: ObjectID,
403 rescue: bool,
404) -> anyhow::Result<()> {
405 let clients = Arc::new(make_clients(iota_client).await?);
406 let output = get_object(id, None, None, clients.clone()).await?;
407 let output = GroupedObjectOutput::new(output, committee);
408 if output.fully_locked {
409 println!("Object {} is fully locked.", id);
410 return Ok(());
411 }
412 let top_record = output.voting_power.first().unwrap();
413 let top_record_stake = top_record.1;
414 let top_record = top_record.0.unwrap();
415 if top_record.4.is_none() {
416 println!(
417 "Object {} does not seem to be locked by majority of validators (unlocked stake: {})",
418 id, top_record_stake
419 );
420 return Ok(());
421 }
422
423 let tx_digest = top_record.2;
424 if !rescue {
425 println!("Object {} is rescueable, top tx: {:?}", id, tx_digest);
426 return Ok(());
427 }
428 println!("Object {} is rescueable, trying tx {}", id, tx_digest);
429 let validator = output
430 .grouped_results
431 .get(&Some(top_record))
432 .unwrap()
433 .first()
434 .unwrap();
435 let client = &clients.get(validator).unwrap().1;
436 let tx = client
437 .handle_transaction_info_request(TransactionInfoRequest {
438 transaction_digest: tx_digest,
439 })
440 .await?
441 .transaction;
442 let res = iota_client
443 .quorum_driver_api()
444 .execute_transaction_block(
445 Transaction::new(tx),
446 IotaTransactionBlockResponseOptions::full_content(),
447 None,
448 )
449 .await;
450 match res {
451 Ok(_) => {
452 println!("Transaction executed successfully ({:?})", tx_digest);
453 }
454 Err(e) => {
455 println!("Failed to execute transaction ({:?}): {:?}", tx_digest, e);
456 }
457 }
458 Ok(())
459}
460
461impl ToolCommand {
462 pub async fn execute(self, tracing_handle: TracingHandle) -> Result<(), anyhow::Error> {
463 match self {
464 ToolCommand::LockedObject {
465 id,
466 fullnode_rpc_url,
467 rescue,
468 address,
469 } => {
470 let iota_client =
471 Arc::new(IotaClientBuilder::default().build(fullnode_rpc_url).await?);
472 let committee = Arc::new(
473 iota_client
474 .governance_api()
475 .get_committee_info(None)
476 .await?
477 .validators
478 .into_iter()
479 .collect::<BTreeMap<_, _>>(),
480 );
481 let object_ids = match id {
482 Some(id) => vec![id],
483 None => {
484 let address = address.expect("Either id or address must be provided");
485 iota_client
486 .coin_read_api()
487 .get_coins_stream(address, None)
488 .map(|c| c.coin_object_id)
489 .collect()
490 .await
491 }
492 };
493 for ids in object_ids.chunks(30) {
494 let mut tasks = vec![];
495 for id in ids {
496 tasks.push(check_locked_object(
497 &iota_client,
498 committee.clone(),
499 *id,
500 rescue,
501 ))
502 }
503 join_all(tasks)
504 .await
505 .into_iter()
506 .collect::<Result<Vec<_>, _>>()?;
507 }
508 }
509 ToolCommand::FetchObject {
510 id,
511 validator,
512 version,
513 fullnode_rpc_url,
514 verbosity,
515 concise_no_header,
516 } => {
517 let iota_client =
518 Arc::new(IotaClientBuilder::default().build(fullnode_rpc_url).await?);
519 let clients = Arc::new(make_clients(&iota_client).await?);
520 let output = get_object(id, version, validator, clients).await?;
521
522 match verbosity {
523 Verbosity::Grouped => {
524 let committee = Arc::new(
525 iota_client
526 .governance_api()
527 .get_committee_info(None)
528 .await?
529 .validators
530 .into_iter()
531 .collect::<BTreeMap<_, _>>(),
532 );
533 println!("{}", GroupedObjectOutput::new(output, committee));
534 }
535 Verbosity::Verbose => {
536 println!("{}", VerboseObjectOutput(output));
537 }
538 Verbosity::Concise => {
539 if !concise_no_header {
540 println!("{}", ConciseObjectOutput::header());
541 }
542 println!("{}", ConciseObjectOutput(output));
543 }
544 }
545 }
546 ToolCommand::FetchTransaction {
547 digest,
548 show_input_tx,
549 fullnode_rpc_url,
550 } => {
551 print!(
552 "{}",
553 get_transaction_block(digest, show_input_tx, fullnode_rpc_url).await?
554 );
555 }
556 ToolCommand::DbTool { db_path, cmd } => {
557 let path = PathBuf::from(db_path);
558 match cmd {
559 Some(c) => execute_db_tool_command(path, c).await?,
560 None => print_db_all_tables(path)?,
561 }
562 }
563 ToolCommand::DumpPackages {
564 rpc_url,
565 output_dir,
566 before_checkpoint,
567 verbose,
568 } => {
569 if !verbose {
570 tracing_handle
571 .update_log("off")
572 .expect("Failed to update log level");
573 }
574
575 iota_package_dump::dump(rpc_url, output_dir, before_checkpoint).await?;
576 }
577 ToolCommand::DumpValidators { genesis, concise } => {
578 let genesis = Genesis::load(genesis).unwrap();
579 if !concise {
580 println!("{:#?}", genesis.validator_set_for_tooling());
581 } else {
582 for (i, val_info) in genesis.validator_set_for_tooling().iter().enumerate() {
583 let metadata = val_info.verified_metadata();
584 println!(
585 "#{:<2} {:<20} {:?} {:?} {}",
586 i,
587 metadata.name,
588 metadata.iota_pubkey_bytes().concise(),
589 metadata.net_address,
590 anemo::PeerId(metadata.network_pubkey.0.to_bytes()),
591 )
592 }
593 }
594 }
595 ToolCommand::DumpGenesis { genesis } => {
596 let genesis = Genesis::load(genesis)?;
597 println!("{:#?}", genesis);
598 }
599 ToolCommand::FetchCheckpoint {
600 sequence_number,
601 fullnode_rpc_url,
602 } => {
603 let iota_client =
604 Arc::new(IotaClientBuilder::default().build(fullnode_rpc_url).await?);
605 let clients = make_clients(&iota_client).await?;
606
607 for (name, (_, client)) in clients {
608 let resp = client
609 .handle_checkpoint(CheckpointRequest {
610 sequence_number,
611 request_content: true,
612 certified: true,
613 })
614 .await
615 .unwrap();
616 let CheckpointResponse {
617 checkpoint,
618 contents,
619 } = resp;
620 println!("Validator: {:?}\n", name.concise());
621 println!("Checkpoint: {:?}\n", checkpoint);
622 println!("Content: {:?}\n", contents);
623 }
624 }
625 ToolCommand::Anemo { args } => {
626 let config = crate::make_anemo_config();
627 anemo_cli::run(config, args).await
628 }
629 ToolCommand::RestoreFromDBCheckpoint {
630 config_path,
631 db_checkpoint_path,
632 } => {
633 let config = iota_config::NodeConfig::load(config_path)?;
634 restore_from_db_checkpoint(&config, &db_checkpoint_path).await?;
635 }
636 ToolCommand::DownloadFormalSnapshot {
637 epoch,
638 genesis,
639 path,
640 num_parallel_downloads,
641 verify,
642 network,
643 snapshot_bucket,
644 snapshot_bucket_type,
645 snapshot_path,
646 no_sign_request,
647 latest,
648 verbose,
649 all_checkpoints,
650 } => {
651 if !verbose {
652 tracing_handle
653 .update_log("off")
654 .expect("Failed to update log level");
655 }
656 let num_parallel_downloads = num_parallel_downloads.unwrap_or_else(|| {
657 num_cpus::get()
658 .checked_sub(1)
659 .expect("Failed to get number of CPUs")
660 });
661 let snapshot_bucket =
662 snapshot_bucket.or_else(|| match (network, no_sign_request) {
663 (Chain::Mainnet, false) => Some(
664 env::var("MAINNET_FORMAL_SIGNED_BUCKET")
665 .unwrap_or("iota-mainnet-formal".to_string()),
666 ),
667 (Chain::Mainnet, true) => env::var("MAINNET_FORMAL_UNSIGNED_BUCKET").ok(),
668 (Chain::Testnet, true) => env::var("TESTNET_FORMAL_UNSIGNED_BUCKET").ok(),
669 (Chain::Testnet, _) => Some(
670 env::var("TESTNET_FORMAL_SIGNED_BUCKET")
671 .unwrap_or("iota-testnet-formal".to_string()),
672 ),
673 (Chain::Unknown, _) => {
674 panic!("Cannot generate default snapshot bucket for unknown network");
675 }
676 });
677
678 let aws_endpoint = env::var("AWS_SNAPSHOT_ENDPOINT").ok().or_else(|| {
679 if no_sign_request {
680 if network == Chain::Mainnet {
681 Some("https://formal-snapshot.mainnet.iota.cafe".to_string())
682 } else if network == Chain::Testnet {
683 Some("https://formal-snapshot.testnet.iota.cafe".to_string())
684 } else {
685 None
686 }
687 } else {
688 None
689 }
690 });
691
692 let snapshot_bucket_type = if no_sign_request {
693 ObjectStoreType::S3
694 } else {
695 snapshot_bucket_type
696 .expect("You must set either --snapshot-bucket-type or --no-sign-request")
697 };
698 let snapshot_store_config = match snapshot_bucket_type {
699 ObjectStoreType::S3 => ObjectStoreConfig {
700 object_store: Some(ObjectStoreType::S3),
701 bucket: snapshot_bucket.filter(|s| !s.is_empty()),
702 aws_access_key_id: env::var("AWS_SNAPSHOT_ACCESS_KEY_ID").ok(),
703 aws_secret_access_key: env::var("AWS_SNAPSHOT_SECRET_ACCESS_KEY").ok(),
704 aws_region: env::var("AWS_SNAPSHOT_REGION").ok(),
705 aws_endpoint: aws_endpoint.filter(|s| !s.is_empty()),
706 aws_virtual_hosted_style_request: env::var(
707 "AWS_SNAPSHOT_VIRTUAL_HOSTED_REQUESTS",
708 )
709 .ok()
710 .and_then(|b| b.parse().ok())
711 .unwrap_or(no_sign_request),
712 object_store_connection_limit: 200,
713 no_sign_request,
714 ..Default::default()
715 },
716 ObjectStoreType::GCS => ObjectStoreConfig {
717 object_store: Some(ObjectStoreType::GCS),
718 bucket: snapshot_bucket,
719 google_service_account: env::var("GCS_SNAPSHOT_SERVICE_ACCOUNT_FILE_PATH")
720 .ok(),
721 object_store_connection_limit: 200,
722 no_sign_request,
723 ..Default::default()
724 },
725 ObjectStoreType::Azure => ObjectStoreConfig {
726 object_store: Some(ObjectStoreType::Azure),
727 bucket: snapshot_bucket,
728 azure_storage_account: env::var("AZURE_SNAPSHOT_STORAGE_ACCOUNT").ok(),
729 azure_storage_access_key: env::var("AZURE_SNAPSHOT_STORAGE_ACCESS_KEY")
730 .ok(),
731 object_store_connection_limit: 200,
732 no_sign_request,
733 ..Default::default()
734 },
735 ObjectStoreType::File => {
736 if snapshot_path.is_some() {
737 ObjectStoreConfig {
738 object_store: Some(ObjectStoreType::File),
739 directory: snapshot_path,
740 ..Default::default()
741 }
742 } else {
743 panic!(
744 "--snapshot-path must be specified for --snapshot-bucket-type=file"
745 );
746 }
747 }
748 };
749
750 let archive_bucket = Some(
751 env::var("FORMAL_SNAPSHOT_ARCHIVE_BUCKET").unwrap_or_else(|_| match network {
752 Chain::Mainnet => "iota-mainnet-archive".to_string(),
753 Chain::Testnet => "iota-testnet-archive".to_string(),
754 Chain::Unknown => {
755 panic!("Cannot generate default archive bucket for unknown network");
756 }
757 }),
758 );
759
760 let mut custom_archive_enabled = false;
761 if let Ok(custom_archive_check) = env::var("CUSTOM_ARCHIVE_BUCKET") {
762 if custom_archive_check == "true" {
763 custom_archive_enabled = true;
764 }
765 }
766 let archive_store_config = if custom_archive_enabled {
767 let aws_region = Some(
768 env::var("FORMAL_SNAPSHOT_ARCHIVE_REGION")
769 .unwrap_or("us-west-2".to_string()),
770 );
771
772 let archive_bucket_type = env::var("FORMAL_SNAPSHOT_ARCHIVE_BUCKET_TYPE").expect("If setting `CUSTOM_ARCHIVE_BUCKET=true` Must set FORMAL_SNAPSHOT_ARCHIVE_BUCKET_TYPE, and credentials");
773 match archive_bucket_type.to_ascii_lowercase().as_str() {
774 "s3" => ObjectStoreConfig {
775 object_store: Some(ObjectStoreType::S3),
776 bucket: archive_bucket.filter(|s| !s.is_empty()),
777 aws_access_key_id: env::var("AWS_ARCHIVE_ACCESS_KEY_ID").ok(),
778 aws_secret_access_key: env::var("AWS_ARCHIVE_SECRET_ACCESS_KEY").ok(),
779 aws_region,
780 aws_endpoint: env::var("AWS_ARCHIVE_ENDPOINT").ok(),
781 aws_virtual_hosted_style_request: env::var(
782 "AWS_ARCHIVE_VIRTUAL_HOSTED_REQUESTS",
783 )
784 .ok()
785 .and_then(|b| b.parse().ok())
786 .unwrap_or(false),
787 object_store_connection_limit: 50,
788 no_sign_request: false,
789 ..Default::default()
790 },
791 "gcs" => ObjectStoreConfig {
792 object_store: Some(ObjectStoreType::GCS),
793 bucket: archive_bucket,
794 google_service_account: env::var(
795 "GCS_ARCHIVE_SERVICE_ACCOUNT_FILE_PATH",
796 )
797 .ok(),
798 object_store_connection_limit: 50,
799 no_sign_request: false,
800 ..Default::default()
801 },
802 "azure" => ObjectStoreConfig {
803 object_store: Some(ObjectStoreType::Azure),
804 bucket: archive_bucket,
805 azure_storage_account: env::var("AZURE_ARCHIVE_STORAGE_ACCOUNT").ok(),
806 azure_storage_access_key: env::var("AZURE_ARCHIVE_STORAGE_ACCESS_KEY")
807 .ok(),
808 object_store_connection_limit: 50,
809 no_sign_request: false,
810 ..Default::default()
811 },
812 _ => panic!(
813 "If setting `CUSTOM_ARCHIVE_BUCKET=true` must set FORMAL_SNAPSHOT_ARCHIVE_BUCKET_TYPE to one of 'gcs', 'azure', or 's3' "
814 ),
815 }
816 } else {
817 let aws_endpoint = env::var("AWS_ARCHIVE_ENDPOINT").ok().or_else(|| {
820 if network == Chain::Mainnet {
821 Some("https://archive.mainnet.iota.cafe".to_string())
822 } else if network == Chain::Testnet {
823 Some("https://archive.testnet.iota.cafe".to_string())
824 } else {
825 None
826 }
827 });
828
829 let aws_virtual_hosted_style_request =
830 env::var("AWS_ARCHIVE_VIRTUAL_HOSTED_REQUESTS")
831 .ok()
832 .and_then(|b| b.parse().ok())
833 .unwrap_or(matches!(network, Chain::Mainnet | Chain::Testnet));
834
835 ObjectStoreConfig {
836 object_store: Some(ObjectStoreType::S3),
837 bucket: archive_bucket.filter(|s| !s.is_empty()),
838 aws_region: Some("us-west-2".to_string()),
839 aws_endpoint,
840 aws_virtual_hosted_style_request,
841 object_store_connection_limit: 200,
842 no_sign_request: true,
843 ..Default::default()
844 }
845 };
846 let latest_available_epoch =
847 latest.then_some(get_latest_available_epoch(&snapshot_store_config).await?);
848 let epoch_to_download = epoch.or(latest_available_epoch).expect(
849 "Either pass epoch with --epoch <epoch_num> or use latest with --latest",
850 );
851
852 if let Err(e) =
853 check_completed_snapshot(&snapshot_store_config, epoch_to_download).await
854 {
855 panic!(
856 "Aborting snapshot restore: {}, snapshot may not be uploaded yet",
857 e
858 );
859 }
860
861 let verify = verify.unwrap_or_default();
862 download_formal_snapshot(
863 &path,
864 epoch_to_download,
865 &genesis,
866 snapshot_store_config,
867 archive_store_config,
868 num_parallel_downloads,
869 network,
870 verify,
871 all_checkpoints,
872 )
873 .await?;
874 }
875 ToolCommand::DownloadDBSnapshot {
876 epoch,
877 path,
878 skip_indexes,
879 num_parallel_downloads,
880 network,
881 snapshot_bucket,
882 snapshot_bucket_type,
883 snapshot_path,
884 no_sign_request,
885 latest,
886 verbose,
887 } => {
888 if !verbose {
889 tracing_handle
890 .update_log("off")
891 .expect("Failed to update log level");
892 }
893 let num_parallel_downloads = num_parallel_downloads.unwrap_or_else(|| {
894 num_cpus::get()
895 .checked_sub(1)
896 .expect("Failed to get number of CPUs")
897 });
898 let snapshot_bucket =
899 snapshot_bucket.or_else(|| match (network, no_sign_request) {
900 (Chain::Mainnet, false) => Some(
901 env::var("MAINNET_DB_SIGNED_BUCKET")
902 .unwrap_or("iota-mainnet-snapshots".to_string()),
903 ),
904 (Chain::Mainnet, true) => env::var("MAINNET_DB_UNSIGNED_BUCKET").ok(),
905 (Chain::Testnet, true) => env::var("TESTNET_DB_UNSIGNED_BUCKET").ok(),
906 (Chain::Testnet, _) => Some(
907 env::var("TESTNET_DB_SIGNED_BUCKET")
908 .unwrap_or("iota-testnet-snapshots".to_string()),
909 ),
910 (Chain::Unknown, _) => {
911 panic!("Cannot generate default snapshot bucket for unknown network");
912 }
913 });
914
915 let aws_endpoint = env::var("AWS_SNAPSHOT_ENDPOINT").ok();
916 let snapshot_bucket_type = if no_sign_request {
917 ObjectStoreType::S3
918 } else {
919 snapshot_bucket_type
920 .expect("You must set either --snapshot-bucket-type or --no-sign-request")
921 };
922 let snapshot_store_config = if no_sign_request {
923 let aws_endpoint = env::var("AWS_SNAPSHOT_ENDPOINT").ok().or_else(|| {
924 if network == Chain::Mainnet {
925 Some("https://rocksdb-snapshot.mainnet.iota.cafe".to_string())
926 } else if network == Chain::Testnet {
927 Some("https://rocksdb-snapshot.testnet.iota.cafe".to_string())
928 } else {
929 None
930 }
931 });
932 ObjectStoreConfig {
933 object_store: Some(ObjectStoreType::S3),
934 aws_endpoint: aws_endpoint.filter(|s| !s.is_empty()),
935 aws_virtual_hosted_style_request: env::var(
936 "AWS_SNAPSHOT_VIRTUAL_HOSTED_REQUESTS",
937 )
938 .ok()
939 .and_then(|b| b.parse().ok())
940 .unwrap_or(no_sign_request),
941 object_store_connection_limit: 200,
942 no_sign_request,
943 ..Default::default()
944 }
945 } else {
946 match snapshot_bucket_type {
947 ObjectStoreType::S3 => ObjectStoreConfig {
948 object_store: Some(ObjectStoreType::S3),
949 bucket: snapshot_bucket.filter(|s| !s.is_empty()),
950 aws_access_key_id: env::var("AWS_SNAPSHOT_ACCESS_KEY_ID").ok(),
951 aws_secret_access_key: env::var("AWS_SNAPSHOT_SECRET_ACCESS_KEY").ok(),
952 aws_region: env::var("AWS_SNAPSHOT_REGION").ok(),
953 aws_endpoint: aws_endpoint.filter(|s| !s.is_empty()),
954 aws_virtual_hosted_style_request: env::var(
955 "AWS_SNAPSHOT_VIRTUAL_HOSTED_REQUESTS",
956 )
957 .ok()
958 .and_then(|b| b.parse().ok())
959 .unwrap_or(no_sign_request),
960 object_store_connection_limit: 200,
961 no_sign_request,
962 ..Default::default()
963 },
964 ObjectStoreType::GCS => ObjectStoreConfig {
965 object_store: Some(ObjectStoreType::GCS),
966 bucket: snapshot_bucket,
967 google_service_account: env::var(
968 "GCS_SNAPSHOT_SERVICE_ACCOUNT_FILE_PATH",
969 )
970 .ok(),
971 google_project_id: env::var("GCS_SNAPSHOT_SERVICE_ACCOUNT_PROJECT_ID")
972 .ok(),
973 object_store_connection_limit: 200,
974 no_sign_request,
975 ..Default::default()
976 },
977 ObjectStoreType::Azure => ObjectStoreConfig {
978 object_store: Some(ObjectStoreType::Azure),
979 bucket: snapshot_bucket,
980 azure_storage_account: env::var("AZURE_SNAPSHOT_STORAGE_ACCOUNT").ok(),
981 azure_storage_access_key: env::var("AZURE_SNAPSHOT_STORAGE_ACCESS_KEY")
982 .ok(),
983 object_store_connection_limit: 200,
984 no_sign_request,
985 ..Default::default()
986 },
987 ObjectStoreType::File => {
988 if snapshot_path.is_some() {
989 ObjectStoreConfig {
990 object_store: Some(ObjectStoreType::File),
991 directory: snapshot_path,
992 ..Default::default()
993 }
994 } else {
995 panic!(
996 "--snapshot-path must be specified for --snapshot-bucket-type=file"
997 );
998 }
999 }
1000 }
1001 };
1002
1003 let latest_available_epoch =
1004 latest.then_some(get_latest_available_epoch(&snapshot_store_config).await?);
1005 let epoch_to_download = epoch.or(latest_available_epoch).expect(
1006 "Either pass epoch with --epoch <epoch_num> or use latest with --latest",
1007 );
1008
1009 if let Err(e) =
1010 check_completed_snapshot(&snapshot_store_config, epoch_to_download).await
1011 {
1012 panic!(
1013 "Aborting snapshot restore: {}, snapshot may not be uploaded yet",
1014 e
1015 );
1016 }
1017 download_db_snapshot(
1018 &path,
1019 epoch_to_download,
1020 snapshot_store_config,
1021 skip_indexes,
1022 num_parallel_downloads,
1023 )
1024 .await?;
1025 }
1026 ToolCommand::Replay {
1027 rpc_url,
1028 safety_checks,
1029 cmd,
1030 use_authority,
1031 cfg_path,
1032 chain,
1033 } => {
1034 execute_replay_command(rpc_url, safety_checks, use_authority, cfg_path, chain, cmd)
1035 .await?;
1036 }
1037 ToolCommand::VerifyArchive {
1038 genesis,
1039 object_store_config,
1040 download_concurrency,
1041 } => {
1042 verify_archive(&genesis, object_store_config, download_concurrency, true).await?;
1043 }
1044 ToolCommand::PrintArchiveManifest {
1045 object_store_config,
1046 } => {
1047 println!("{}", read_manifest_as_json(object_store_config).await?);
1048 }
1049 ToolCommand::UpdateArchiveManifest {
1050 object_store_config,
1051 archive_json_path,
1052 } => {
1053 write_manifest_from_json(object_store_config, archive_json_path).await?;
1054 }
1055 ToolCommand::VerifyArchiveByChecksum {
1056 object_store_config,
1057 download_concurrency,
1058 } => {
1059 verify_archive_by_checksum(object_store_config, download_concurrency).await?;
1060 }
1061 ToolCommand::DumpArchiveByChecksum {
1062 object_store_config,
1063 start,
1064 end,
1065 max_content_length,
1066 } => {
1067 dump_checkpoints_from_archive(object_store_config, start, end, max_content_length)
1068 .await?;
1069 }
1070 ToolCommand::SignTransaction {
1071 genesis,
1072 sender_signed_data,
1073 } => {
1074 let genesis = Genesis::load(genesis)?;
1075 let sender_signed_data = bcs::from_bytes::<SenderSignedData>(
1076 &fastcrypto::encoding::Base64::decode(sender_signed_data.as_str()).unwrap(),
1077 )
1078 .unwrap();
1079 let transaction = Transaction::new(sender_signed_data);
1080 let (agg, _) =
1081 AuthorityAggregatorBuilder::from_genesis(&genesis).build_network_clients();
1082 let result = agg.process_transaction(transaction, None).await;
1083 println!("{:?}", result);
1084 }
1085 };
1086 Ok(())
1087 }
1088}