typed_store/
test_db.rs

1// Copyright (c) Mysten Labs, Inc.
2// Modifications Copyright (c) 2024 IOTA Stiftung
3// SPDX-License-Identifier: Apache-2.0
4
5#![allow(clippy::await_holding_lock)]
6
7use std::{
8    borrow::Borrow,
9    collections::{BTreeMap, HashMap, VecDeque, btree_map::Iter},
10    marker::PhantomData,
11    ops::RangeBounds,
12    sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard},
13};
14
15use bincode::Options;
16use collectable::TryExtend;
17use ouroboros::self_referencing;
18use rand::distributions::{Alphanumeric, DistString};
19use rocksdb::Direction;
20use serde::{Serialize, de::DeserializeOwned};
21
22use crate::{
23    Map, TypedStoreError,
24    rocks::{be_fix_int_ser, errors::typed_store_err_from_bcs_err},
25};
26
27/// An interface to a btree map backed sally database. This is mainly intended
28/// for tests and performing benchmark comparisons
29#[derive(Clone, Debug)]
30pub struct TestDB<K, V> {
31    pub rows: Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
32    pub name: String,
33    _phantom: PhantomData<fn(K) -> V>,
34}
35
36impl<K, V> TestDB<K, V> {
37    pub fn open() -> Self {
38        TestDB {
39            rows: Arc::new(RwLock::new(BTreeMap::new())),
40            name: Alphanumeric.sample_string(&mut rand::thread_rng(), 16),
41            _phantom: PhantomData,
42        }
43    }
44    pub fn batch(&self) -> TestDBWriteBatch {
45        TestDBWriteBatch::default()
46    }
47}
48
49#[self_referencing(pub_extras)]
50pub struct TestDBIter<'a, K, V> {
51    pub rows: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>,
52    #[borrows(mut rows)]
53    #[covariant]
54    pub iter: Iter<'this, Vec<u8>, Vec<u8>>,
55    phantom: PhantomData<(K, V)>,
56    pub direction: Direction,
57}
58
59#[self_referencing(pub_extras)]
60pub struct TestDBKeys<'a, K> {
61    rows: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>,
62    #[borrows(mut rows)]
63    #[covariant]
64    pub iter: Iter<'this, Vec<u8>, Vec<u8>>,
65    phantom: PhantomData<K>,
66}
67
68#[self_referencing(pub_extras)]
69pub struct TestDBValues<'a, V> {
70    rows: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>,
71    #[borrows(mut rows)]
72    #[covariant]
73    pub iter: Iter<'this, Vec<u8>, Vec<u8>>,
74    phantom: PhantomData<V>,
75}
76
77impl<K: DeserializeOwned, V: DeserializeOwned> Iterator for TestDBIter<'_, K, V> {
78    type Item = Result<(K, V), TypedStoreError>;
79
80    fn next(&mut self) -> Option<Self::Item> {
81        let mut out: Option<Self::Item> = None;
82        let config = bincode::DefaultOptions::new()
83            .with_big_endian()
84            .with_fixint_encoding();
85        self.with_mut(|fields| {
86            let resp = match fields.direction {
87                Direction::Forward => fields.iter.next(),
88                Direction::Reverse => panic!("Reverse iteration not supported in test db"),
89            };
90            if let Some((raw_key, raw_value)) = resp {
91                let key: K = config.deserialize(raw_key).ok().unwrap();
92                let value: V = bcs::from_bytes(raw_value).ok().unwrap();
93                out = Some(Ok((key, value)));
94            }
95        });
96        out
97    }
98}
99
100impl<'a, K: Serialize, V> TestDBIter<'a, K, V> {
101    /// Skips all the elements that are smaller than the given key,
102    /// and either lands on the key or the first one greater than
103    /// the key.
104    pub fn skip_to(mut self, key: &K) -> Result<Self, TypedStoreError> {
105        self.with_mut(|fields| {
106            let serialized_key = be_fix_int_ser(key).expect("serialization failed");
107            let mut peekable = fields.iter.peekable();
108            let mut peeked = peekable.peek();
109            while peeked.is_some() {
110                let serialized = be_fix_int_ser(peeked.unwrap()).expect("serialization failed");
111                if serialized >= serialized_key {
112                    break;
113                } else {
114                    peekable.next();
115                    peeked = peekable.peek();
116                }
117            }
118        });
119        Ok(self)
120    }
121
122    /// Moves the iterator to the element given or
123    /// the one prior to it if it does not exist. If there is
124    /// no element prior to it, it returns an empty iterator.
125    pub fn skip_prior_to(mut self, key: &K) -> Result<Self, TypedStoreError> {
126        self.with_mut(|fields| {
127            let serialized_key = be_fix_int_ser(key).expect("serialization failed");
128            let mut peekable = fields.iter.peekable();
129            let mut peeked = peekable.peek();
130            while peeked.is_some() {
131                let serialized = be_fix_int_ser(peeked.unwrap()).expect("serialization failed");
132                if serialized > serialized_key {
133                    break;
134                } else {
135                    peekable.next();
136                    peeked = peekable.peek();
137                }
138            }
139        });
140        Ok(self)
141    }
142
143    /// Seeks to the last key in the database (at this column family).
144    pub fn skip_to_last(mut self) -> Self {
145        self.with_mut(|fields| {
146            // `last` instead of `next_back` because we actually want to consume `iter`
147            fields.iter.last();
148        });
149        self
150    }
151
152    /// Will make the direction of the iteration reverse and will
153    /// create a new `RevIter` to consume. Every call to `next` method
154    /// will give the next element from the end.
155    pub fn reverse(mut self) -> TestDBRevIter<'a, K, V> {
156        self.with_mut(|fields| {
157            *fields.direction = Direction::Reverse;
158        });
159        TestDBRevIter::new(self)
160    }
161}
162
163/// An iterator with a reverted direction to the original. The `RevIter`
164/// is hosting an iteration which is consuming in the opposing direction.
165/// It's not possible to do further manipulation (ex re-reverse) to the
166/// iterator.
167pub struct TestDBRevIter<'a, K, V> {
168    iter: TestDBIter<'a, K, V>,
169}
170
171impl<'a, K, V> TestDBRevIter<'a, K, V> {
172    fn new(iter: TestDBIter<'a, K, V>) -> Self {
173        Self { iter }
174    }
175}
176
177impl<K: DeserializeOwned, V: DeserializeOwned> Iterator for TestDBRevIter<'_, K, V> {
178    type Item = Result<(K, V), TypedStoreError>;
179
180    /// Will give the next item backwards
181    fn next(&mut self) -> Option<Self::Item> {
182        self.iter.next()
183    }
184}
185
186impl<K: DeserializeOwned> Iterator for TestDBKeys<'_, K> {
187    type Item = Result<K, TypedStoreError>;
188
189    fn next(&mut self) -> Option<Self::Item> {
190        let mut out: Option<Self::Item> = None;
191        self.with_mut(|fields| {
192            let config = bincode::DefaultOptions::new()
193                .with_big_endian()
194                .with_fixint_encoding();
195            if let Some((raw_key, _)) = fields.iter.next() {
196                let key: K = config.deserialize(raw_key).ok().unwrap();
197                out = Some(Ok(key));
198            }
199        });
200        out
201    }
202}
203
204impl<V: DeserializeOwned> Iterator for TestDBValues<'_, V> {
205    type Item = Result<V, TypedStoreError>;
206
207    fn next(&mut self) -> Option<Self::Item> {
208        let mut out: Option<Self::Item> = None;
209        self.with_mut(|fields| {
210            if let Some((_, raw_value)) = fields.iter.next() {
211                let value: V = bcs::from_bytes(raw_value).ok().unwrap();
212                out = Some(Ok(value));
213            }
214        });
215        out
216    }
217}
218
219impl<'a, K, V> Map<'a, K, V> for TestDB<K, V>
220where
221    K: Serialize + DeserializeOwned,
222    V: Serialize + DeserializeOwned,
223{
224    type Error = TypedStoreError;
225    type SafeIterator = TestDBIter<'a, K, V>;
226
227    fn contains_key(&self, key: &K) -> Result<bool, Self::Error> {
228        let raw_key = be_fix_int_ser(key)?;
229        let locked = self.rows.read().unwrap();
230        Ok(locked.contains_key(&raw_key))
231    }
232
233    fn get(&self, key: &K) -> Result<Option<V>, Self::Error> {
234        let raw_key = be_fix_int_ser(key)?;
235        let locked = self.rows.read().unwrap();
236        let res = locked.get(&raw_key);
237        Ok(res.map(|raw_value| bcs::from_bytes(raw_value).ok().unwrap()))
238    }
239
240    fn insert(&self, key: &K, value: &V) -> Result<(), Self::Error> {
241        let raw_key = be_fix_int_ser(key)?;
242        let raw_value = bcs::to_bytes(value).map_err(typed_store_err_from_bcs_err)?;
243        let mut locked = self.rows.write().unwrap();
244        locked.insert(raw_key, raw_value);
245        Ok(())
246    }
247
248    fn remove(&self, key: &K) -> Result<(), Self::Error> {
249        let raw_key = be_fix_int_ser(key)?;
250        let mut locked = self.rows.write().unwrap();
251        locked.remove(&raw_key);
252        Ok(())
253    }
254
255    fn unsafe_clear(&self) -> Result<(), Self::Error> {
256        let mut locked = self.rows.write().unwrap();
257        locked.clear();
258        Ok(())
259    }
260
261    fn schedule_delete_all(&self) -> Result<(), TypedStoreError> {
262        let mut locked = self.rows.write().unwrap();
263        locked.clear();
264        Ok(())
265    }
266
267    fn is_empty(&self) -> bool {
268        let locked = self.rows.read().unwrap();
269        locked.is_empty()
270    }
271
272    fn safe_iter(&'a self) -> Self::SafeIterator {
273        TestDBIterBuilder {
274            rows: self.rows.read().unwrap(),
275            iter_builder: |rows: &mut RwLockReadGuard<'a, BTreeMap<Vec<u8>, Vec<u8>>>| rows.iter(),
276            phantom: PhantomData,
277            direction: Direction::Forward,
278        }
279        .build()
280    }
281
282    fn safe_iter_with_bounds(
283        &'a self,
284        _lower_bound: Option<K>,
285        _upper_bound: Option<K>,
286    ) -> Self::SafeIterator {
287        unimplemented!("unimplemented API");
288    }
289
290    fn safe_range_iter(&'a self, _range: impl RangeBounds<K>) -> Self::SafeIterator {
291        unimplemented!("unimplemented API");
292    }
293
294    fn try_catch_up_with_primary(&self) -> Result<(), Self::Error> {
295        Ok(())
296    }
297}
298
299impl<J, K, U, V> TryExtend<(J, U)> for TestDB<K, V>
300where
301    J: Borrow<K>,
302    U: Borrow<V>,
303    K: Serialize,
304    V: Serialize,
305{
306    type Error = TypedStoreError;
307
308    fn try_extend<T>(&mut self, iter: &mut T) -> Result<(), Self::Error>
309    where
310        T: Iterator<Item = (J, U)>,
311    {
312        let mut wb = self.batch();
313        wb.insert_batch(self, iter)?;
314        wb.write()
315    }
316
317    fn try_extend_from_slice(&mut self, slice: &[(J, U)]) -> Result<(), Self::Error> {
318        let slice_of_refs = slice.iter().map(|(k, v)| (k.borrow(), v.borrow()));
319        let mut wb = self.batch();
320        wb.insert_batch(self, slice_of_refs)?;
321        wb.write()
322    }
323}
324
325pub type DeleteBatchPayload = (
326    Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
327    String,
328    Vec<Vec<u8>>,
329);
330pub type DeleteRangePayload = (
331    Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
332    String,
333    (Vec<u8>, Vec<u8>),
334);
335pub type InsertBatchPayload = (
336    Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
337    String,
338    Vec<(Vec<u8>, Vec<u8>)>,
339);
340type DBAndName = (Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>, String);
341
342pub enum WriteBatchOp {
343    DeleteBatch(DeleteBatchPayload),
344    DeleteRange(DeleteRangePayload),
345    InsertBatch(InsertBatchPayload),
346}
347
348#[derive(Default)]
349pub struct TestDBWriteBatch {
350    pub ops: VecDeque<WriteBatchOp>,
351}
352
353#[self_referencing]
354pub struct DBLocked {
355    db: Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>,
356    #[borrows(db)]
357    #[covariant]
358    db_guard: RwLockWriteGuard<'this, BTreeMap<Vec<u8>, Vec<u8>>>,
359}
360
361impl TestDBWriteBatch {
362    pub fn write(self) -> Result<(), TypedStoreError> {
363        let mut dbs: Vec<DBAndName> = self
364            .ops
365            .iter()
366            .map(|op| match op {
367                WriteBatchOp::DeleteBatch((db, name, _)) => (db.clone(), name.clone()),
368                WriteBatchOp::DeleteRange((db, name, _)) => (db.clone(), name.clone()),
369                WriteBatchOp::InsertBatch((db, name, _)) => (db.clone(), name.clone()),
370            })
371            .collect();
372        dbs.sort_by_key(|(_k, v)| v.clone());
373        dbs.dedup_by_key(|(_k, v)| v.clone());
374        // lock all databases
375        let mut db_locks = HashMap::new();
376        dbs.iter().for_each(|(db, name)| {
377            if !db_locks.contains_key(name) {
378                db_locks.insert(
379                    name.clone(),
380                    DBLockedBuilder {
381                        db: db.clone(),
382                        db_guard_builder: |db: &Arc<RwLock<BTreeMap<Vec<u8>, Vec<u8>>>>| {
383                            db.write().unwrap()
384                        },
385                    }
386                    .build(),
387                );
388            }
389        });
390        self.ops.iter().for_each(|op| match op {
391            WriteBatchOp::DeleteBatch((_, id, keys)) => {
392                let locked = db_locks.get_mut(id).unwrap();
393                locked.with_db_guard_mut(|db| {
394                    keys.iter().for_each(|key| {
395                        db.remove(key);
396                    });
397                });
398            }
399            WriteBatchOp::DeleteRange((_, id, (from, to))) => {
400                let locked = db_locks.get_mut(id).unwrap();
401                locked.with_db_guard_mut(|db| {
402                    db.retain(|k, _| k < from || k >= to);
403                });
404            }
405            WriteBatchOp::InsertBatch((_, id, key_values)) => {
406                let locked = db_locks.get_mut(id).unwrap();
407                locked.with_db_guard_mut(|db| {
408                    key_values.iter().for_each(|(k, v)| {
409                        db.insert(k.clone(), v.clone());
410                    });
411                });
412            }
413        });
414        // unlock in the reverse order
415        dbs.iter().rev().for_each(|(_db, id)| {
416            if db_locks.contains_key(id) {
417                db_locks.remove(id);
418            }
419        });
420        Ok(())
421    }
422    /// Deletes a set of keys given as an iterator
423    pub fn delete_batch<J: Borrow<K>, K: Serialize, V>(
424        &mut self,
425        db: &TestDB<K, V>,
426        purged_vals: impl IntoIterator<Item = J>,
427    ) -> Result<(), TypedStoreError> {
428        self.ops.push_back(WriteBatchOp::DeleteBatch((
429            db.rows.clone(),
430            db.name.clone(),
431            purged_vals
432                .into_iter()
433                .map(|key| be_fix_int_ser(&key.borrow()).unwrap())
434                .collect(),
435        )));
436        Ok(())
437    }
438    /// Deletes a range of keys between `from` (inclusive) and `to`
439    /// (non-inclusive)
440    pub fn delete_range<K: Serialize, V>(
441        &mut self,
442        db: &TestDB<K, V>,
443        from: &K,
444        to: &K,
445    ) -> Result<(), TypedStoreError> {
446        let raw_from = be_fix_int_ser(from).unwrap();
447        let raw_to = be_fix_int_ser(to).unwrap();
448        self.ops.push_back(WriteBatchOp::DeleteRange((
449            db.rows.clone(),
450            db.name.clone(),
451            (raw_from, raw_to),
452        )));
453        Ok(())
454    }
455    /// inserts a range of (key, value) pairs given as an iterator
456    pub fn insert_batch<J: Borrow<K>, K: Serialize, U: Borrow<V>, V: Serialize>(
457        &mut self,
458        db: &TestDB<K, V>,
459        new_vals: impl IntoIterator<Item = (J, U)>,
460    ) -> Result<(), TypedStoreError> {
461        self.ops.push_back(WriteBatchOp::InsertBatch((
462            db.rows.clone(),
463            db.name.clone(),
464            new_vals
465                .into_iter()
466                .map(|(key, value)| {
467                    (
468                        be_fix_int_ser(&key.borrow()).unwrap(),
469                        bcs::to_bytes(&value.borrow()).unwrap(),
470                    )
471                })
472                .collect(),
473        )));
474        Ok(())
475    }
476}
477
478#[cfg(test)]
479mod test {
480    use crate::{Map, test_db::TestDB};
481
482    #[test]
483    fn test_contains_key() {
484        let db = TestDB::open();
485        db.insert(&123456789, &"123456789".to_string())
486            .expect("Failed to insert");
487        assert!(
488            db.contains_key(&123456789)
489                .expect("Failed to call contains key")
490        );
491        assert!(
492            !db.contains_key(&000000000)
493                .expect("Failed to call contains key")
494        );
495    }
496
497    #[test]
498    fn test_get() {
499        let db = TestDB::open();
500        db.insert(&123456789, &"123456789".to_string())
501            .expect("Failed to insert");
502        assert_eq!(
503            Some("123456789".to_string()),
504            db.get(&123456789).expect("Failed to get")
505        );
506        assert_eq!(None, db.get(&000000000).expect("Failed to get"));
507    }
508
509    #[test]
510    fn test_multi_get() {
511        let db = TestDB::open();
512        db.insert(&123, &"123".to_string())
513            .expect("Failed to insert");
514        db.insert(&456, &"456".to_string())
515            .expect("Failed to insert");
516
517        let result = db.multi_get([123, 456, 789]).expect("Failed to multi get");
518
519        assert_eq!(result.len(), 3);
520        assert_eq!(result[0], Some("123".to_string()));
521        assert_eq!(result[1], Some("456".to_string()));
522        assert_eq!(result[2], None);
523    }
524
525    #[test]
526    fn test_remove() {
527        let db = TestDB::open();
528        db.insert(&123456789, &"123456789".to_string())
529            .expect("Failed to insert");
530        assert!(db.get(&123456789).expect("Failed to get").is_some());
531
532        db.remove(&123456789).expect("Failed to remove");
533        assert!(db.get(&123456789).expect("Failed to get").is_none());
534    }
535
536    #[test]
537    fn test_iter() {
538        let db = TestDB::open();
539        db.insert(&123456789, &"123456789".to_string())
540            .expect("Failed to insert");
541
542        let mut iter = db.safe_iter();
543        assert_eq!(Some(Ok((123456789, "123456789".to_string()))), iter.next());
544        assert_eq!(None, iter.next());
545    }
546
547    #[test]
548    fn test_iter_reverse() {
549        let db = TestDB::open();
550        db.insert(&1, &"1".to_string()).expect("Failed to insert");
551        db.insert(&2, &"2".to_string()).expect("Failed to insert");
552        db.insert(&3, &"3".to_string()).expect("Failed to insert");
553        let mut iter = db.safe_iter();
554
555        assert_eq!(Some(Ok((1, "1".to_string()))), iter.next());
556        assert_eq!(Some(Ok((2, "2".to_string()))), iter.next());
557        assert_eq!(Some(Ok((3, "3".to_string()))), iter.next());
558        assert_eq!(None, iter.next());
559    }
560
561    #[test]
562    fn test_values() {
563        let db = TestDB::open();
564
565        db.insert(&123456789, &"123456789".to_string())
566            .expect("Failed to insert");
567    }
568
569    #[test]
570    fn test_insert_batch() {
571        let db = TestDB::open();
572        let keys_vals = (1..100).map(|i| (i, i.to_string()));
573        let mut wb = db.batch();
574        wb.insert_batch(&db, keys_vals.clone())
575            .expect("Failed to batch insert");
576        wb.write().expect("Failed to execute batch");
577        for (k, v) in keys_vals {
578            let val = db.get(&k).expect("Failed to get inserted key");
579            assert_eq!(Some(v), val);
580        }
581    }
582
583    #[test]
584    fn test_insert_batch_across_cf() {
585        let db_cf_1 = TestDB::open();
586        let keys_vals_1 = (1..100).map(|i| (i, i.to_string()));
587
588        let db_cf_2 = TestDB::open();
589        let keys_vals_2 = (1000..1100).map(|i| (i, i.to_string()));
590
591        let mut wb = db_cf_1.batch();
592        wb.insert_batch(&db_cf_1, keys_vals_1.clone())
593            .expect("Failed to batch insert");
594        wb.insert_batch(&db_cf_2, keys_vals_2.clone())
595            .expect("Failed to batch insert");
596        wb.write().expect("Failed to execute batch");
597        for (k, v) in keys_vals_1 {
598            let val = db_cf_1.get(&k).expect("Failed to get inserted key");
599            assert_eq!(Some(v), val);
600        }
601
602        for (k, v) in keys_vals_2 {
603            let val = db_cf_2.get(&k).expect("Failed to get inserted key");
604            assert_eq!(Some(v), val);
605        }
606    }
607
608    #[test]
609    fn test_delete_batch() {
610        let db: TestDB<i32, String> = TestDB::open();
611
612        let keys_vals = (1..100).map(|i| (i, i.to_string()));
613        let mut wb = db.batch();
614        wb.insert_batch(&db, keys_vals)
615            .expect("Failed to batch insert");
616
617        // delete the odd-index keys
618        let deletion_keys = (1..100).step_by(2);
619        wb.delete_batch(&db, deletion_keys)
620            .expect("Failed to batch delete");
621
622        wb.write().expect("Failed to execute batch");
623
624        db.safe_iter().for_each(|item| {
625            assert!(item.unwrap().0 % 2 == 0);
626        });
627    }
628
629    #[test]
630    fn test_delete_range() {
631        let db: TestDB<i32, String> = TestDB::open();
632
633        // Note that the last element is (100, "100".to_owned()) here
634        let keys_vals = (0..101).map(|i| (i, i.to_string()));
635        let mut wb = db.batch();
636        wb.insert_batch(&db, keys_vals)
637            .expect("Failed to batch insert");
638
639        wb.delete_range(&db, &50, &100)
640            .expect("Failed to delete range");
641
642        wb.write().expect("Failed to execute batch");
643
644        for k in 0..50 {
645            assert!(db.contains_key(&k).expect("Failed to query legal key"),);
646        }
647        for k in 50..100 {
648            assert!(!db.contains_key(&k).expect("Failed to query legal key"));
649        }
650
651        // range operator is not inclusive of to
652        assert!(db.contains_key(&100).expect("Failed to query legal key"));
653    }
654
655    #[test]
656    fn test_clear() {
657        let db: TestDB<i32, String> = TestDB::open();
658
659        // Test clear of empty map
660        let _ = db.unsafe_clear();
661
662        let keys_vals = (0..101).map(|i| (i, i.to_string()));
663        let mut wb = db.batch();
664        wb.insert_batch(&db, keys_vals)
665            .expect("Failed to batch insert");
666
667        wb.write().expect("Failed to execute batch");
668
669        // Check we have multiple entries
670        assert!(db.safe_iter().count() > 1);
671        let _ = db.unsafe_clear();
672        assert_eq!(db.safe_iter().count(), 0);
673        // Clear again to ensure safety when clearing empty map
674        let _ = db.unsafe_clear();
675        assert_eq!(db.safe_iter().count(), 0);
676        // Clear with one item
677        let _ = db.insert(&1, &"e".to_string());
678        assert_eq!(db.safe_iter().count(), 1);
679        let _ = db.unsafe_clear();
680        assert_eq!(db.safe_iter().count(), 0);
681    }
682
683    #[test]
684    fn test_is_empty() {
685        let db: TestDB<i32, String> = TestDB::open();
686
687        // Test empty map is truly empty
688        assert!(db.is_empty());
689        let _ = db.unsafe_clear();
690        assert!(db.is_empty());
691
692        let keys_vals = (0..101).map(|i| (i, i.to_string()));
693        let mut wb = db.batch();
694        wb.insert_batch(&db, keys_vals)
695            .expect("Failed to batch insert");
696
697        wb.write().expect("Failed to execute batch");
698
699        // Check we have multiple entries and not empty
700        assert!(db.safe_iter().count() > 1);
701        assert!(!db.is_empty());
702
703        // Clear again to ensure empty works after clearing
704        let _ = db.unsafe_clear();
705        assert_eq!(db.safe_iter().count(), 0);
706        assert!(db.is_empty());
707    }
708
709    #[test]
710    fn test_multi_insert() {
711        // Init a DB
712        let db: TestDB<i32, String> = TestDB::open();
713
714        // Create kv pairs
715        let keys_vals = (0..101).map(|i| (i, i.to_string()));
716
717        db.multi_insert(keys_vals.clone())
718            .expect("Failed to multi-insert");
719
720        for (k, v) in keys_vals {
721            let val = db.get(&k).expect("Failed to get inserted key");
722            assert_eq!(Some(v), val);
723        }
724    }
725
726    #[test]
727    fn test_multi_remove() {
728        // Init a DB
729        let db: TestDB<i32, String> = TestDB::open();
730
731        // Create kv pairs
732        let keys_vals = (0..101).map(|i| (i, i.to_string()));
733
734        db.multi_insert(keys_vals.clone())
735            .expect("Failed to multi-insert");
736
737        // Check insertion
738        for (k, v) in keys_vals.clone() {
739            let val = db.get(&k).expect("Failed to get inserted key");
740            assert_eq!(Some(v), val);
741        }
742
743        // Remove 50 items
744        db.multi_remove(keys_vals.clone().map(|kv| kv.0).take(50))
745            .expect("Failed to multi-remove");
746        assert_eq!(db.safe_iter().count(), 101 - 50);
747
748        // Check that the remaining are present
749        for (k, v) in keys_vals.skip(50) {
750            let val = db.get(&k).expect("Failed to get inserted key");
751            assert_eq!(Some(v), val);
752        }
753    }
754}