1use std::path::PathBuf;
6
7use diesel::{QueryableByName, connection::SimpleConnection, sql_types::BigInt};
8use iota_json_rpc_types::IotaTransactionBlockResponse;
9use iota_metrics::init_metrics;
10use tokio::task::JoinHandle;
11use tokio_util::sync::CancellationToken;
12
13use crate::{
14 IndexerMetrics,
15 config::{
16 IngestionConfig, IotaNamesOptions, PruningOptions, RetentionConfig, SnapshotLagConfig,
17 },
18 db::{ConnectionPool, ConnectionPoolConfig, PoolConnection, new_connection_pool},
19 errors::IndexerError,
20 indexer::Indexer,
21 store::{PgIndexerAnalyticalStore, PgIndexerStore},
22};
23
24pub type DBInitHook = Box<dyn FnOnce(&PgIndexerStore) + Send>;
61
62pub enum IndexerTypeConfig {
63 Reader {
64 reader_mode_rpc_url: String,
65 },
66 Writer {
67 snapshot_config: SnapshotLagConfig,
68 retention_config: Option<RetentionConfig>,
69 optimistic_pruner_batch_size: Option<u64>,
70 },
71 AnalyticalWorker,
72}
73
74impl IndexerTypeConfig {
75 pub fn reader_mode(reader_mode_rpc_url: String) -> Self {
76 Self::Reader {
77 reader_mode_rpc_url,
78 }
79 }
80
81 pub fn writer_mode(
82 snapshot_config: Option<SnapshotLagConfig>,
83 pruning_options: Option<PruningOptions>,
84 ) -> Self {
85 Self::Writer {
86 snapshot_config: snapshot_config.unwrap_or_default(),
87 retention_config: pruning_options.as_ref().and_then(|pruning_options| {
88 pruning_options
89 .epochs_to_keep
90 .map(RetentionConfig::new_with_default_retention_only_for_testing)
91 }),
92 optimistic_pruner_batch_size: pruning_options
93 .and_then(|pruning_options| pruning_options.optimistic_pruner_batch_size),
94 }
95 }
96}
97
98pub async fn start_test_indexer(
99 db_url: String,
100 reset_db: bool,
101 db_init_hook: Option<DBInitHook>,
102 rpc_url: String,
103 reader_writer_config: IndexerTypeConfig,
104 data_ingestion_path: Option<PathBuf>,
105) -> (
106 PgIndexerStore,
107 JoinHandle<Result<(), IndexerError>>,
108 CancellationToken,
109) {
110 let token = CancellationToken::new();
111 let (store, handle) = start_test_indexer_impl(
112 db_url,
113 reset_db,
114 db_init_hook,
115 rpc_url,
116 reader_writer_config,
117 data_ingestion_path,
118 token.clone(),
119 )
120 .await;
121 (store, handle, token)
122}
123
124pub async fn start_test_indexer_impl(
127 db_url: String,
128 reset_db: bool,
129 db_init_hook: Option<DBInitHook>,
130 rpc_url: String,
131 reader_writer_config: IndexerTypeConfig,
132 data_ingestion_path: Option<PathBuf>,
133 cancel: CancellationToken,
134) -> (PgIndexerStore, JoinHandle<Result<(), IndexerError>>) {
135 let store = create_pg_store(&db_url, reset_db);
136 if reset_db {
137 crate::db::reset_database(&mut store.blocking_cp().get().unwrap()).unwrap();
138 }
139 if let Some(db_init_hook) = db_init_hook {
140 db_init_hook(&store);
141 }
142
143 let registry = prometheus::Registry::default();
144 init_metrics(®istry);
145 let indexer_metrics = IndexerMetrics::new(®istry);
146
147 let handle = match reader_writer_config {
148 IndexerTypeConfig::Reader {
149 reader_mode_rpc_url,
150 } => {
151 let config = crate::config::JsonRpcConfig {
152 iota_names_options: IotaNamesOptions::default(),
153 historic_fallback_options: crate::config::HistoricFallbackOptions {
154 fallback_kv_url: None,
155 },
156 rpc_address: reader_mode_rpc_url.parse().unwrap(),
157 rpc_client_url: rpc_url,
158 };
159 let pool = store.blocking_cp();
160 let store_clone = store.clone();
161 tokio::spawn(async move {
162 Indexer::start_reader(&config, store_clone, ®istry, pool, indexer_metrics).await
163 })
164 }
165 IndexerTypeConfig::Writer {
166 snapshot_config,
167 retention_config,
168 optimistic_pruner_batch_size,
169 } => {
170 let store_clone = store.clone();
171 let mut ingestion_config = IngestionConfig::default();
172 ingestion_config.sources.remote_store_url = data_ingestion_path
173 .is_none()
174 .then_some(format!("{rpc_url}/api/v1").parse().unwrap());
175 ingestion_config.sources.data_ingestion_path = data_ingestion_path;
176 ingestion_config.sources.rpc_client_url = Some(rpc_url.parse().unwrap());
177
178 tokio::spawn(async move {
179 Indexer::start_writer_with_config(
180 &ingestion_config,
181 store_clone,
182 indexer_metrics,
183 snapshot_config,
184 retention_config,
185 optimistic_pruner_batch_size,
186 cancel,
187 )
188 .await
189 })
190 }
191 IndexerTypeConfig::AnalyticalWorker => {
192 let store = PgIndexerAnalyticalStore::new(store.blocking_cp());
193
194 tokio::spawn(
195 async move { Indexer::start_analytical_worker(store, indexer_metrics).await },
196 )
197 }
198 };
199
200 (store, handle)
201}
202
203pub struct TestDatabase {
205 pub url: String,
206 db_name: String,
207 connection: PoolConnection,
208 pool_config: ConnectionPoolConfig,
209}
210
211impl TestDatabase {
212 pub fn new(db_url: String) -> Self {
213 let pool_config = ConnectionPoolConfig {
216 pool_size: 5,
217 ..Default::default()
218 };
219
220 let db_name = db_url.split('/').next_back().unwrap().into();
221 let (default_url, _) = replace_db_name(&db_url, "postgres");
222 let blocking_pool = new_connection_pool(&default_url, &pool_config).unwrap();
223 let connection = blocking_pool.get().unwrap();
224 Self {
225 url: db_url,
226 db_name,
227 connection,
228 pool_config,
229 }
230 }
231
232 pub fn drop_if_exists(&mut self) {
234 self.connection
235 .batch_execute(&format!("DROP DATABASE IF EXISTS {}", self.db_name))
236 .unwrap();
237 }
238
239 pub fn create(&mut self) {
241 self.connection
242 .batch_execute(&format!("CREATE DATABASE {}", self.db_name))
243 .unwrap();
244 }
245
246 pub fn recreate(&mut self) {
248 self.drop_if_exists();
249 self.create();
250 }
251
252 pub fn to_connection_pool(&self) -> ConnectionPool {
254 new_connection_pool(&self.url, &self.pool_config).unwrap()
255 }
256
257 pub fn reset_db(&mut self) {
258 crate::db::reset_database(&mut self.to_connection_pool().get().unwrap()).unwrap();
259 }
260}
261
262pub fn create_pg_store(db_url: &str, reset_database: bool) -> PgIndexerStore {
263 let registry = prometheus::Registry::default();
264 init_metrics(®istry);
265 let indexer_metrics = IndexerMetrics::new(®istry);
266
267 let mut test_db = TestDatabase::new(db_url.to_string());
268 if reset_database {
269 test_db.recreate();
270 }
271
272 PgIndexerStore::new(test_db.to_connection_pool(), indexer_metrics.clone())
273}
274
275fn replace_db_name(db_url: &str, new_db_name: &str) -> (String, String) {
276 let pos = db_url.rfind('/').expect("unable to find / in db_url");
277 let old_db_name = &db_url[pos + 1..];
278
279 (
280 format!("{}/{}", &db_url[..pos], new_db_name),
281 old_db_name.to_string(),
282 )
283}
284
285pub async fn force_delete_database(db_url: String) {
286 let (default_db_url, db_name) = replace_db_name(&db_url, "postgres");
291 let mut pool_config = ConnectionPoolConfig::default();
292 pool_config.set_pool_size(1);
293
294 let blocking_pool = new_connection_pool(&default_db_url, &pool_config).unwrap();
295 blocking_pool
296 .get()
297 .unwrap()
298 .batch_execute(&format!("DROP DATABASE IF EXISTS {db_name} WITH (FORCE)"))
299 .unwrap();
300}
301
302#[derive(Clone)]
303pub struct IotaTransactionBlockResponseBuilder<'a> {
304 response: IotaTransactionBlockResponse,
305 full_response: &'a IotaTransactionBlockResponse,
306}
307
308impl<'a> IotaTransactionBlockResponseBuilder<'a> {
309 pub fn new(full_response: &'a IotaTransactionBlockResponse) -> Self {
310 Self {
311 response: IotaTransactionBlockResponse::default(),
312 full_response,
313 }
314 }
315
316 pub fn with_input(mut self) -> Self {
317 self.response = IotaTransactionBlockResponse {
318 transaction: self.full_response.transaction.clone(),
319 ..self.response
320 };
321 self
322 }
323
324 pub fn with_raw_input(mut self) -> Self {
325 self.response = IotaTransactionBlockResponse {
326 raw_transaction: self.full_response.raw_transaction.clone(),
327 ..self.response
328 };
329 self
330 }
331
332 pub fn with_effects(mut self) -> Self {
333 self.response = IotaTransactionBlockResponse {
334 effects: self.full_response.effects.clone(),
335 ..self.response
336 };
337 self
338 }
339
340 pub fn with_events(mut self) -> Self {
341 self.response = IotaTransactionBlockResponse {
342 events: self.full_response.events.clone(),
343 ..self.response
344 };
345 self
346 }
347
348 pub fn with_balance_changes(mut self) -> Self {
349 self.response = IotaTransactionBlockResponse {
350 balance_changes: self.full_response.balance_changes.clone(),
351 ..self.response
352 };
353 self
354 }
355
356 pub fn with_object_changes(mut self) -> Self {
357 self.response = IotaTransactionBlockResponse {
358 object_changes: self.full_response.object_changes.clone(),
359 ..self.response
360 };
361 self
362 }
363
364 pub fn with_input_and_changes(mut self) -> Self {
365 self.response = IotaTransactionBlockResponse {
366 transaction: self.full_response.transaction.clone(),
367 balance_changes: self.full_response.balance_changes.clone(),
368 object_changes: self.full_response.object_changes.clone(),
369 ..self.response
370 };
371 self
372 }
373
374 pub fn build(self) -> IotaTransactionBlockResponse {
375 IotaTransactionBlockResponse {
376 transaction: self.response.transaction,
377 raw_transaction: self.response.raw_transaction,
378 effects: self.response.effects,
379 events: self.response.events,
380 balance_changes: self.response.balance_changes,
381 object_changes: self.response.object_changes,
382 ..self.full_response.clone()
384 }
385 }
386}
387
388pub fn db_url(db_name: &str) -> String {
392 format!("postgres://postgres:postgrespw@localhost:5432/{db_name}")
393}
394
395#[derive(QueryableByName, Debug)]
397pub struct RowCount {
398 #[diesel(sql_type = BigInt)]
399 pub cnt: i64,
400}