typed_store/
test_db.rs

1// Copyright (c) Mysten Labs, Inc.
2// Modifications Copyright (c) 2024 IOTA Stiftung
3// SPDX-License-Identifier: Apache-2.0
4
5#![allow(clippy::await_holding_lock)]
6
7use std::{
8    borrow::Borrow,
9    collections::{BTreeMap, HashMap, VecDeque, btree_map::Iter},
10    marker::PhantomData,
11    ops::RangeBounds,
12    sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard},
13};
14
15use bincode::Options;
16use collectable::TryExtend;
17use ouroboros::self_referencing;
18use rand::distributions::{Alphanumeric, DistString};
19use rocksdb::Direction;
20use serde::{Serialize, de::DeserializeOwned};
21
22use crate::{
23    Map, TypedStoreError,
24    rocks::{be_fix_int_ser, errors::typed_store_err_from_bcs_err},
25};
26
27/// An interface to a btree map backed sally database. This is mainly intended
28/// for tests and performing benchmark comparisons
29#[derive(Clone, Debug)]
30pub struct TestDB<K, V> {
31    pub rows: Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
32    pub name: String,
33    _phantom: PhantomData<fn(K) -> V>,
34}
35
36impl<K, V> TestDB<K, V> {
37    pub fn open() -> Self {
38        TestDB {
39            rows: Arc::new(RwLock::new(BTreeMap::new())),
40            name: Alphanumeric.sample_string(&mut rand::thread_rng(), 16),
41            _phantom: PhantomData,
42        }
43    }
44    pub fn batch(&self) -> TestDBWriteBatch {
45        TestDBWriteBatch::default()
46    }
47}
48
49#[self_referencing(pub_extras)]
50pub struct TestDBIter<'a, K, V> {
51    pub rows: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>,
52    #[borrows(mut rows)]
53    #[covariant]
54    pub iter: Iter<'this, Vec<u8>, Vec<u8>>,
55    phantom: PhantomData<(K, V)>,
56    pub direction: Direction,
57}
58
59#[self_referencing(pub_extras)]
60pub struct TestDBKeys<'a, K> {
61    rows: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>,
62    #[borrows(mut rows)]
63    #[covariant]
64    pub iter: Iter<'this, Vec<u8>, Vec<u8>>,
65    phantom: PhantomData<K>,
66}
67
68#[self_referencing(pub_extras)]
69pub struct TestDBValues<'a, V> {
70    rows: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>,
71    #[borrows(mut rows)]
72    #[covariant]
73    pub iter: Iter<'this, Vec<u8>, Vec<u8>>,
74    phantom: PhantomData<V>,
75}
76
77impl<K: DeserializeOwned, V: DeserializeOwned> Iterator for TestDBIter<'_, K, V> {
78    type Item = Result<(K, V), TypedStoreError>;
79
80    fn next(&mut self) -> Option<Self::Item> {
81        let mut out: Option<Self::Item> = None;
82        let config = bincode::DefaultOptions::new()
83            .with_big_endian()
84            .with_fixint_encoding();
85        self.with_mut(|fields| {
86            let resp = match fields.direction {
87                Direction::Forward => fields.iter.next(),
88                Direction::Reverse => panic!("Reverse iteration not supported in test db"),
89            };
90            if let Some((raw_key, raw_value)) = resp {
91                let key: K = config.deserialize(raw_key).ok().unwrap();
92                let value: V = bcs::from_bytes(raw_value).ok().unwrap();
93                out = Some(Ok((key, value)));
94            }
95        });
96        out
97    }
98}
99
100impl<'a, K: Serialize, V> TestDBIter<'a, K, V> {
101    /// Skips all the elements that are smaller than the given key,
102    /// and either lands on the key or the first one greater than
103    /// the key.
104    pub fn skip_to(mut self, key: &K) -> Result<Self, TypedStoreError> {
105        self.with_mut(|fields| {
106            let serialized_key = be_fix_int_ser(key).expect("serialization failed");
107            let mut peekable = fields.iter.peekable();
108            let mut peeked = peekable.peek();
109            while peeked.is_some() {
110                let serialized = be_fix_int_ser(peeked.unwrap()).expect("serialization failed");
111                if serialized >= serialized_key {
112                    break;
113                } else {
114                    peekable.next();
115                    peeked = peekable.peek();
116                }
117            }
118        });
119        Ok(self)
120    }
121
122    /// Moves the iterator to the element given or
123    /// the one prior to it if it does not exist. If there is
124    /// no element prior to it, it returns an empty iterator.
125    pub fn skip_prior_to(mut self, key: &K) -> Result<Self, TypedStoreError> {
126        self.with_mut(|fields| {
127            let serialized_key = be_fix_int_ser(key).expect("serialization failed");
128            let mut peekable = fields.iter.peekable();
129            let mut peeked = peekable.peek();
130            while peeked.is_some() {
131                let serialized = be_fix_int_ser(peeked.unwrap()).expect("serialization failed");
132                if serialized > serialized_key {
133                    break;
134                } else {
135                    peekable.next();
136                    peeked = peekable.peek();
137                }
138            }
139        });
140        Ok(self)
141    }
142
143    /// Seeks to the last key in the database (at this column family).
144    pub fn skip_to_last(mut self) -> Self {
145        self.with_mut(|fields| {
146            // `last` instead of `next_back` because we actually want to consume `iter`
147            fields.iter.last();
148        });
149        self
150    }
151
152    /// Will make the direction of the iteration reverse and will
153    /// create a new `RevIter` to consume. Every call to `next` method
154    /// will give the next element from the end.
155    pub fn reverse(mut self) -> TestDBRevIter<'a, K, V> {
156        self.with_mut(|fields| {
157            *fields.direction = Direction::Reverse;
158        });
159        TestDBRevIter::new(self)
160    }
161}
162
163/// An iterator with a reverted direction to the original. The `RevIter`
164/// is hosting an iteration which is consuming in the opposing direction.
165/// It's not possible to do further manipulation (ex re-reverse) to the
166/// iterator.
167pub struct TestDBRevIter<'a, K, V> {
168    iter: TestDBIter<'a, K, V>,
169}
170
171impl<'a, K, V> TestDBRevIter<'a, K, V> {
172    fn new(iter: TestDBIter<'a, K, V>) -> Self {
173        Self { iter }
174    }
175}
176
177impl<K: DeserializeOwned, V: DeserializeOwned> Iterator for TestDBRevIter<'_, K, V> {
178    type Item = Result<(K, V), TypedStoreError>;
179
180    /// Will give the next item backwards
181    fn next(&mut self) -> Option<Self::Item> {
182        self.iter.next()
183    }
184}
185
186impl<K: DeserializeOwned> Iterator for TestDBKeys<'_, K> {
187    type Item = Result<K, TypedStoreError>;
188
189    fn next(&mut self) -> Option<Self::Item> {
190        let mut out: Option<Self::Item> = None;
191        self.with_mut(|fields| {
192            let config = bincode::DefaultOptions::new()
193                .with_big_endian()
194                .with_fixint_encoding();
195            if let Some((raw_key, _)) = fields.iter.next() {
196                let key: K = config.deserialize(raw_key).ok().unwrap();
197                out = Some(Ok(key));
198            }
199        });
200        out
201    }
202}
203
204impl<V: DeserializeOwned> Iterator for TestDBValues<'_, V> {
205    type Item = Result<V, TypedStoreError>;
206
207    fn next(&mut self) -> Option<Self::Item> {
208        let mut out: Option<Self::Item> = None;
209        self.with_mut(|fields| {
210            if let Some((_, raw_value)) = fields.iter.next() {
211                let value: V = bcs::from_bytes(raw_value).ok().unwrap();
212                out = Some(Ok(value));
213            }
214        });
215        out
216    }
217}
218
219impl<'a, K, V> Map<'a, K, V> for TestDB<K, V>
220where
221    K: Serialize + DeserializeOwned,
222    V: Serialize + DeserializeOwned,
223{
224    type Error = TypedStoreError;
225    type Iterator = std::iter::Empty<(K, V)>;
226    type SafeIterator = TestDBIter<'a, K, V>;
227
228    fn contains_key(&self, key: &K) -> Result<bool, Self::Error> {
229        let raw_key = be_fix_int_ser(key)?;
230        let locked = self.rows.read().unwrap();
231        Ok(locked.contains_key(&raw_key))
232    }
233
234    fn get(&self, key: &K) -> Result<Option<V>, Self::Error> {
235        let raw_key = be_fix_int_ser(key)?;
236        let locked = self.rows.read().unwrap();
237        let res = locked.get(&raw_key);
238        Ok(res.map(|raw_value| bcs::from_bytes(raw_value).ok().unwrap()))
239    }
240
241    fn insert(&self, key: &K, value: &V) -> Result<(), Self::Error> {
242        let raw_key = be_fix_int_ser(key)?;
243        let raw_value = bcs::to_bytes(value).map_err(typed_store_err_from_bcs_err)?;
244        let mut locked = self.rows.write().unwrap();
245        locked.insert(raw_key, raw_value);
246        Ok(())
247    }
248
249    fn remove(&self, key: &K) -> Result<(), Self::Error> {
250        let raw_key = be_fix_int_ser(key)?;
251        let mut locked = self.rows.write().unwrap();
252        locked.remove(&raw_key);
253        Ok(())
254    }
255
256    fn unsafe_clear(&self) -> Result<(), Self::Error> {
257        let mut locked = self.rows.write().unwrap();
258        locked.clear();
259        Ok(())
260    }
261
262    fn schedule_delete_all(&self) -> Result<(), TypedStoreError> {
263        let mut locked = self.rows.write().unwrap();
264        locked.clear();
265        Ok(())
266    }
267
268    fn is_empty(&self) -> bool {
269        let locked = self.rows.read().unwrap();
270        locked.is_empty()
271    }
272
273    fn unbounded_iter(&'a self) -> Self::Iterator {
274        unimplemented!("unimplemented API");
275    }
276
277    fn iter_with_bounds(
278        &'a self,
279        _lower_bound: Option<K>,
280        _upper_bound: Option<K>,
281    ) -> Self::Iterator {
282        unimplemented!("unimplemented API");
283    }
284
285    fn range_iter(&'a self, _range: impl RangeBounds<K>) -> Self::Iterator {
286        unimplemented!("unimplemented API");
287    }
288
289    fn safe_iter(&'a self) -> Self::SafeIterator {
290        TestDBIterBuilder {
291            rows: self.rows.read().unwrap(),
292            iter_builder: |rows: &mut RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>| rows.iter(),
293            phantom: PhantomData,
294            direction: Direction::Forward,
295        }
296        .build()
297    }
298
299    fn safe_iter_with_bounds(
300        &'a self,
301        _lower_bound: Option<K>,
302        _upper_bound: Option<K>,
303    ) -> Self::SafeIterator {
304        unimplemented!("unimplemented API");
305    }
306
307    fn safe_range_iter(&'a self, _range: impl RangeBounds<K>) -> Self::SafeIterator {
308        unimplemented!("unimplemented API");
309    }
310
311    fn try_catch_up_with_primary(&self) -> Result<(), Self::Error> {
312        Ok(())
313    }
314}
315
316impl<J, K, U, V> TryExtend<(J, U)> for TestDB<K, V>
317where
318    J: Borrow<K>,
319    U: Borrow<V>,
320    K: Serialize,
321    V: Serialize,
322{
323    type Error = TypedStoreError;
324
325    fn try_extend<T>(&mut self, iter: &mut T) -> Result<(), Self::Error>
326    where
327        T: Iterator<Item = (J, U)>,
328    {
329        let mut wb = self.batch();
330        wb.insert_batch(self, iter)?;
331        wb.write()
332    }
333
334    fn try_extend_from_slice(&mut self, slice: &[(J, U)]) -> Result<(), Self::Error> {
335        let slice_of_refs = slice.iter().map(|(k, v)| (k.borrow(), v.borrow()));
336        let mut wb = self.batch();
337        wb.insert_batch(self, slice_of_refs)?;
338        wb.write()
339    }
340}
341
342pub type DeleteBatchPayload = (
343    Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
344    String,
345    Vec<Vec<u8>>,
346);
347pub type DeleteRangePayload = (
348    Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
349    String,
350    (Vec<u8>, Vec<u8>),
351);
352pub type InsertBatchPayload = (
353    Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
354    String,
355    Vec<(Vec<u8>, Vec<u8>)>,
356);
357type DBAndName = (Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>, String);
358
359pub enum WriteBatchOp {
360    DeleteBatch(DeleteBatchPayload),
361    DeleteRange(DeleteRangePayload),
362    InsertBatch(InsertBatchPayload),
363}
364
365#[derive(Default)]
366pub struct TestDBWriteBatch {
367    pub ops: VecDeque<WriteBatchOp>,
368}
369
370#[self_referencing]
371pub struct DBLocked {
372    db: Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
373    #[borrows(db)]
374    #[covariant]
375    db_guard: RwLockWriteGuard<'this, BTreeMap<Vec<u8>, Vec<u8>>>,
376}
377
378impl TestDBWriteBatch {
379    pub fn write(self) -> Result<(), TypedStoreError> {
380        let mut dbs: Vec<DBAndName> = self
381            .ops
382            .iter()
383            .map(|op| match op {
384                WriteBatchOp::DeleteBatch((db, name, _)) => (db.clone(), name.clone()),
385                WriteBatchOp::DeleteRange((db, name, _)) => (db.clone(), name.clone()),
386                WriteBatchOp::InsertBatch((db, name, _)) => (db.clone(), name.clone()),
387            })
388            .collect();
389        dbs.sort_by_key(|(_k, v)| v.clone());
390        dbs.dedup_by_key(|(_k, v)| v.clone());
391        // lock all databases
392        let mut db_locks = HashMap::new();
393        dbs.iter().for_each(|(db, name)| {
394            if !db_locks.contains_key(name) {
395                db_locks.insert(
396                    name.clone(),
397                    DBLockedBuilder {
398                        db: db.clone(),
399                        db_guard_builder: |db: &Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>| {
400                            db.write().unwrap()
401                        },
402                    }
403                    .build(),
404                );
405            }
406        });
407        self.ops.iter().for_each(|op| match op {
408            WriteBatchOp::DeleteBatch((_, id, keys)) => {
409                let locked = db_locks.get_mut(id).unwrap();
410                locked.with_db_guard_mut(|db| {
411                    keys.iter().for_each(|key| {
412                        db.remove(key);
413                    });
414                });
415            }
416            WriteBatchOp::DeleteRange((_, id, (from, to))) => {
417                let locked = db_locks.get_mut(id).unwrap();
418                locked.with_db_guard_mut(|db| {
419                    db.retain(|k, _| k < from || k >= to);
420                });
421            }
422            WriteBatchOp::InsertBatch((_, id, key_values)) => {
423                let locked = db_locks.get_mut(id).unwrap();
424                locked.with_db_guard_mut(|db| {
425                    key_values.iter().for_each(|(k, v)| {
426                        db.insert(k.clone(), v.clone());
427                    });
428                });
429            }
430        });
431        // unlock in the reverse order
432        dbs.iter().rev().for_each(|(_db, id)| {
433            if db_locks.contains_key(id) {
434                db_locks.remove(id);
435            }
436        });
437        Ok(())
438    }
439    /// Deletes a set of keys given as an iterator
440    pub fn delete_batch<J: Borrow<K>, K: Serialize, V>(
441        &mut self,
442        db: &TestDB<K, V>,
443        purged_vals: impl IntoIterator<Item = J>,
444    ) -> Result<(), TypedStoreError> {
445        self.ops.push_back(WriteBatchOp::DeleteBatch((
446            db.rows.clone(),
447            db.name.clone(),
448            purged_vals
449                .into_iter()
450                .map(|key| be_fix_int_ser(&key.borrow()).unwrap())
451                .collect(),
452        )));
453        Ok(())
454    }
455    /// Deletes a range of keys between `from` (inclusive) and `to`
456    /// (non-inclusive)
457    pub fn delete_range<K: Serialize, V>(
458        &mut self,
459        db: &TestDB<K, V>,
460        from: &K,
461        to: &K,
462    ) -> Result<(), TypedStoreError> {
463        let raw_from = be_fix_int_ser(from).unwrap();
464        let raw_to = be_fix_int_ser(to).unwrap();
465        self.ops.push_back(WriteBatchOp::DeleteRange((
466            db.rows.clone(),
467            db.name.clone(),
468            (raw_from, raw_to),
469        )));
470        Ok(())
471    }
472    /// inserts a range of (key, value) pairs given as an iterator
473    pub fn insert_batch<J: Borrow<K>, K: Serialize, U: Borrow<V>, V: Serialize>(
474        &mut self,
475        db: &TestDB<K, V>,
476        new_vals: impl IntoIterator<Item = (J, U)>,
477    ) -> Result<(), TypedStoreError> {
478        self.ops.push_back(WriteBatchOp::InsertBatch((
479            db.rows.clone(),
480            db.name.clone(),
481            new_vals
482                .into_iter()
483                .map(|(key, value)| {
484                    (
485                        be_fix_int_ser(&key.borrow()).unwrap(),
486                        bcs::to_bytes(&value.borrow()).unwrap(),
487                    )
488                })
489                .collect(),
490        )));
491        Ok(())
492    }
493}
494
495#[cfg(test)]
496mod test {
497    use crate::{Map, test_db::TestDB};
498
499    #[test]
500    fn test_contains_key() {
501        let db = TestDB::open();
502        db.insert(&123456789, &"123456789".to_string())
503            .expect("Failed to insert");
504        assert!(
505            db.contains_key(&123456789)
506                .expect("Failed to call contains key")
507        );
508        assert!(
509            !db.contains_key(&000000000)
510                .expect("Failed to call contains key")
511        );
512    }
513
514    #[test]
515    fn test_get() {
516        let db = TestDB::open();
517        db.insert(&123456789, &"123456789".to_string())
518            .expect("Failed to insert");
519        assert_eq!(
520            Some("123456789".to_string()),
521            db.get(&123456789).expect("Failed to get")
522        );
523        assert_eq!(None, db.get(&000000000).expect("Failed to get"));
524    }
525
526    #[test]
527    fn test_multi_get() {
528        let db = TestDB::open();
529        db.insert(&123, &"123".to_string())
530            .expect("Failed to insert");
531        db.insert(&456, &"456".to_string())
532            .expect("Failed to insert");
533
534        let result = db.multi_get([123, 456, 789]).expect("Failed to multi get");
535
536        assert_eq!(result.len(), 3);
537        assert_eq!(result[0], Some("123".to_string()));
538        assert_eq!(result[1], Some("456".to_string()));
539        assert_eq!(result[2], None);
540    }
541
542    #[test]
543    fn test_remove() {
544        let db = TestDB::open();
545        db.insert(&123456789, &"123456789".to_string())
546            .expect("Failed to insert");
547        assert!(db.get(&123456789).expect("Failed to get").is_some());
548
549        db.remove(&123456789).expect("Failed to remove");
550        assert!(db.get(&123456789).expect("Failed to get").is_none());
551    }
552
553    #[test]
554    fn test_iter() {
555        let db = TestDB::open();
556        db.insert(&123456789, &"123456789".to_string())
557            .expect("Failed to insert");
558
559        let mut iter = db.safe_iter();
560        assert_eq!(Some(Ok((123456789, "123456789".to_string()))), iter.next());
561        assert_eq!(None, iter.next());
562    }
563
564    #[test]
565    fn test_iter_reverse() {
566        let db = TestDB::open();
567        db.insert(&1, &"1".to_string()).expect("Failed to insert");
568        db.insert(&2, &"2".to_string()).expect("Failed to insert");
569        db.insert(&3, &"3".to_string()).expect("Failed to insert");
570        let mut iter = db.safe_iter();
571
572        assert_eq!(Some(Ok((1, "1".to_string()))), iter.next());
573        assert_eq!(Some(Ok((2, "2".to_string()))), iter.next());
574        assert_eq!(Some(Ok((3, "3".to_string()))), iter.next());
575        assert_eq!(None, iter.next());
576    }
577
578    #[test]
579    fn test_values() {
580        let db = TestDB::open();
581
582        db.insert(&123456789, &"123456789".to_string())
583            .expect("Failed to insert");
584    }
585
586    #[test]
587    fn test_insert_batch() {
588        let db = TestDB::open();
589        let keys_vals = (1..100).map(|i| (i, i.to_string()));
590        let mut wb = db.batch();
591        wb.insert_batch(&db, keys_vals.clone())
592            .expect("Failed to batch insert");
593        wb.write().expect("Failed to execute batch");
594        for (k, v) in keys_vals {
595            let val = db.get(&k).expect("Failed to get inserted key");
596            assert_eq!(Some(v), val);
597        }
598    }
599
600    #[test]
601    fn test_insert_batch_across_cf() {
602        let db_cf_1 = TestDB::open();
603        let keys_vals_1 = (1..100).map(|i| (i, i.to_string()));
604
605        let db_cf_2 = TestDB::open();
606        let keys_vals_2 = (1000..1100).map(|i| (i, i.to_string()));
607
608        let mut wb = db_cf_1.batch();
609        wb.insert_batch(&db_cf_1, keys_vals_1.clone())
610            .expect("Failed to batch insert");
611        wb.insert_batch(&db_cf_2, keys_vals_2.clone())
612            .expect("Failed to batch insert");
613        wb.write().expect("Failed to execute batch");
614        for (k, v) in keys_vals_1 {
615            let val = db_cf_1.get(&k).expect("Failed to get inserted key");
616            assert_eq!(Some(v), val);
617        }
618
619        for (k, v) in keys_vals_2 {
620            let val = db_cf_2.get(&k).expect("Failed to get inserted key");
621            assert_eq!(Some(v), val);
622        }
623    }
624
625    #[test]
626    fn test_delete_batch() {
627        let db: TestDB<i32, String> = TestDB::open();
628
629        let keys_vals = (1..100).map(|i| (i, i.to_string()));
630        let mut wb = db.batch();
631        wb.insert_batch(&db, keys_vals)
632            .expect("Failed to batch insert");
633
634        // delete the odd-index keys
635        let deletion_keys = (1..100).step_by(2);
636        wb.delete_batch(&db, deletion_keys)
637            .expect("Failed to batch delete");
638
639        wb.write().expect("Failed to execute batch");
640
641        db.safe_iter().for_each(|item| {
642            assert!(item.unwrap().0 % 2 == 0);
643        });
644    }
645
646    #[test]
647    fn test_delete_range() {
648        let db: TestDB<i32, String> = TestDB::open();
649
650        // Note that the last element is (100, "100".to_owned()) here
651        let keys_vals = (0..101).map(|i| (i, i.to_string()));
652        let mut wb = db.batch();
653        wb.insert_batch(&db, keys_vals)
654            .expect("Failed to batch insert");
655
656        wb.delete_range(&db, &50, &100)
657            .expect("Failed to delete range");
658
659        wb.write().expect("Failed to execute batch");
660
661        for k in 0..50 {
662            assert!(db.contains_key(&k).expect("Failed to query legal key"),);
663        }
664        for k in 50..100 {
665            assert!(!db.contains_key(&k).expect("Failed to query legal key"));
666        }
667
668        // range operator is not inclusive of to
669        assert!(db.contains_key(&100).expect("Failed to query legal key"));
670    }
671
672    #[test]
673    fn test_clear() {
674        let db: TestDB<i32, String> = TestDB::open();
675
676        // Test clear of empty map
677        let _ = db.unsafe_clear();
678
679        let keys_vals = (0..101).map(|i| (i, i.to_string()));
680        let mut wb = db.batch();
681        wb.insert_batch(&db, keys_vals)
682            .expect("Failed to batch insert");
683
684        wb.write().expect("Failed to execute batch");
685
686        // Check we have multiple entries
687        assert!(db.safe_iter().count() > 1);
688        let _ = db.unsafe_clear();
689        assert_eq!(db.safe_iter().count(), 0);
690        // Clear again to ensure safety when clearing empty map
691        let _ = db.unsafe_clear();
692        assert_eq!(db.safe_iter().count(), 0);
693        // Clear with one item
694        let _ = db.insert(&1, &"e".to_string());
695        assert_eq!(db.safe_iter().count(), 1);
696        let _ = db.unsafe_clear();
697        assert_eq!(db.safe_iter().count(), 0);
698    }
699
700    #[test]
701    fn test_is_empty() {
702        let db: TestDB<i32, String> = TestDB::open();
703
704        // Test empty map is truly empty
705        assert!(db.is_empty());
706        let _ = db.unsafe_clear();
707        assert!(db.is_empty());
708
709        let keys_vals = (0..101).map(|i| (i, i.to_string()));
710        let mut wb = db.batch();
711        wb.insert_batch(&db, keys_vals)
712            .expect("Failed to batch insert");
713
714        wb.write().expect("Failed to execute batch");
715
716        // Check we have multiple entries and not empty
717        assert!(db.safe_iter().count() > 1);
718        assert!(!db.is_empty());
719
720        // Clear again to ensure empty works after clearing
721        let _ = db.unsafe_clear();
722        assert_eq!(db.safe_iter().count(), 0);
723        assert!(db.is_empty());
724    }
725
726    #[test]
727    fn test_multi_insert() {
728        // Init a DB
729        let db: TestDB<i32, String> = TestDB::open();
730
731        // Create kv pairs
732        let keys_vals = (0..101).map(|i| (i, i.to_string()));
733
734        db.multi_insert(keys_vals.clone())
735            .expect("Failed to multi-insert");
736
737        for (k, v) in keys_vals {
738            let val = db.get(&k).expect("Failed to get inserted key");
739            assert_eq!(Some(v), val);
740        }
741    }
742
743    #[test]
744    fn test_multi_remove() {
745        // Init a DB
746        let db: TestDB<i32, String> = TestDB::open();
747
748        // Create kv pairs
749        let keys_vals = (0..101).map(|i| (i, i.to_string()));
750
751        db.multi_insert(keys_vals.clone())
752            .expect("Failed to multi-insert");
753
754        // Check insertion
755        for (k, v) in keys_vals.clone() {
756            let val = db.get(&k).expect("Failed to get inserted key");
757            assert_eq!(Some(v), val);
758        }
759
760        // Remove 50 items
761        db.multi_remove(keys_vals.clone().map(|kv| kv.0).take(50))
762            .expect("Failed to multi-remove");
763        assert_eq!(db.safe_iter().count(), 101 - 50);
764
765        // Check that the remaining are present
766        for (k, v) in keys_vals.skip(50) {
767            let val = db.get(&k).expect("Failed to get inserted key");
768            assert_eq!(Some(v), val);
769        }
770    }
771}