iota_types/
execution.rs

1// Copyright (c) Mysten Labs, Inc.
2// Modifications Copyright (c) 2024 IOTA Stiftung
3// SPDX-License-Identifier: Apache-2.0
4
5use std::collections::{BTreeMap, BTreeSet, HashSet};
6
7use move_core_types::language_storage::TypeTag;
8use once_cell::sync::Lazy;
9use serde::{Deserialize, Serialize};
10
11use crate::{
12    base_types::{ObjectID, ObjectRef, SequenceNumber},
13    digests::{ObjectDigest, TransactionDigest},
14    event::Event,
15    is_system_package,
16    object::{Data, Object, Owner},
17    storage::BackingPackageStore,
18    transaction::Argument,
19};
20
21/// A type containing all of the information needed to work with a deleted
22/// shared object in execution and when committing the execution effects of the
23/// transaction. This holds:
24/// 0. The object ID of the deleted shared object.
25/// 1. The version of the shared object.
26/// 2. Whether the object appeared as mutable (or owned) in the transaction, or
27///    as a read-only shared object.
28/// 3. The transaction digest of the previous transaction that used this shared
29///    object mutably or took it by value.
30pub type DeletedSharedObjectInfo = (ObjectID, SequenceNumber, bool, TransactionDigest);
31
32/// A sequence of information about deleted shared objects in the transaction's
33/// inputs.
34pub type DeletedSharedObjects = Vec<DeletedSharedObjectInfo>;
35
36#[derive(Clone, Debug, PartialEq, Eq)]
37pub enum SharedInput {
38    Existing(ObjectRef),
39    Deleted(DeletedSharedObjectInfo),
40    Cancelled((ObjectID, SequenceNumber)),
41}
42
43#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
44pub struct DynamicallyLoadedObjectMetadata {
45    pub version: SequenceNumber,
46    pub digest: ObjectDigest,
47    pub owner: Owner,
48    pub storage_rebate: u64,
49    pub previous_transaction: TransactionDigest,
50}
51
52/// View of the store necessary to produce the layouts of types.
53pub trait TypeLayoutStore: BackingPackageStore {}
54impl<T> TypeLayoutStore for T where T: BackingPackageStore {}
55
56#[derive(Debug)]
57pub enum ExecutionResults {
58    V1(ExecutionResultsV1),
59}
60
61/// Used by iota-execution v1 and above, to capture the execution results from
62/// Move. The results represent the primitive information that can then be used
63/// to construct both transaction effect V1.
64#[derive(Debug, Default)]
65pub struct ExecutionResultsV1 {
66    /// All objects written regardless of whether they were mutated, created, or
67    /// unwrapped.
68    pub written_objects: BTreeMap<ObjectID, Object>,
69    /// All objects that existed prior to this transaction, and are modified in
70    /// this transaction. This includes any type of modification, including
71    /// mutated, wrapped and deleted objects.
72    pub modified_objects: BTreeSet<ObjectID>,
73    /// All object IDs created in this transaction.
74    pub created_object_ids: BTreeSet<ObjectID>,
75    /// All object IDs deleted in this transaction.
76    /// No object ID should be in both created_object_ids and
77    /// deleted_object_ids.
78    pub deleted_object_ids: BTreeSet<ObjectID>,
79    /// All Move events emitted in this transaction.
80    pub user_events: Vec<Event>,
81}
82
83pub type ExecutionResult = (
84    // mutable_reference_outputs
85    Vec<(Argument, Vec<u8>, TypeTag)>,
86    // return_values
87    Vec<(Vec<u8>, TypeTag)>,
88);
89
90impl ExecutionResultsV1 {
91    pub fn drop_writes(&mut self) {
92        self.written_objects.clear();
93        self.modified_objects.clear();
94        self.created_object_ids.clear();
95        self.deleted_object_ids.clear();
96        self.user_events.clear();
97    }
98
99    pub fn merge_results(&mut self, new_results: Self) {
100        self.written_objects.extend(new_results.written_objects);
101        self.modified_objects.extend(new_results.modified_objects);
102        self.created_object_ids
103            .extend(new_results.created_object_ids);
104        self.deleted_object_ids
105            .extend(new_results.deleted_object_ids);
106        self.user_events.extend(new_results.user_events);
107    }
108
109    pub fn update_version_and_previous_tx(
110        &mut self,
111        lamport_version: SequenceNumber,
112        prev_tx: TransactionDigest,
113        input_objects: &BTreeMap<ObjectID, Object>,
114    ) {
115        for (id, obj) in self.written_objects.iter_mut() {
116            // TODO: We can now get rid of the following logic by passing in lamport version
117            // into the execution layer, and create new objects using the lamport version
118            // directly.
119
120            // Update the version for the written object.
121            match &mut obj.data {
122                Data::Move(obj) => {
123                    // Move objects all get the transaction's lamport timestamp
124                    obj.increment_version_to(lamport_version);
125                }
126
127                Data::Package(pkg) => {
128                    // Modified packages get their version incremented (this is a special case that
129                    // only applies to system packages).  All other packages can only be created,
130                    // and they are left alone.
131                    if self.modified_objects.contains(id) {
132                        debug_assert!(is_system_package(*id));
133                        pkg.increment_version();
134                    }
135                }
136            }
137
138            // Record the version that the shared object was created at in its owner field.
139            // Note, this only works because shared objects must be created as
140            // shared (not created as owned in one transaction and later
141            // converted to shared in another).
142            if let Owner::Shared {
143                initial_shared_version,
144            } = &mut obj.owner
145            {
146                if self.created_object_ids.contains(id) {
147                    assert_eq!(
148                        *initial_shared_version,
149                        SequenceNumber::new(),
150                        "Initial version should be blank before this point for {id:?}",
151                    );
152                    *initial_shared_version = lamport_version;
153                }
154
155                // Update initial_shared_version for reshared objects
156                if let Some(Owner::Shared {
157                    initial_shared_version: previous_initial_shared_version,
158                }) = input_objects.get(id).map(|obj| &obj.owner)
159                {
160                    debug_assert!(!self.created_object_ids.contains(id));
161                    debug_assert!(!self.deleted_object_ids.contains(id));
162                    debug_assert!(
163                        *initial_shared_version == SequenceNumber::new()
164                            || *initial_shared_version == *previous_initial_shared_version
165                    );
166
167                    *initial_shared_version = *previous_initial_shared_version;
168                }
169            }
170
171            obj.previous_transaction = prev_tx;
172        }
173    }
174}
175
176/// If a transaction digest shows up in this list, when executing such
177/// transaction, we will always return `ExecutionError::CertificateDenied`
178/// without executing it (but still do gas smashing). Because this list is not
179/// gated by protocol version, there are a few important criteria for adding a
180/// digest to this list:
181/// 1. The certificate must be causing all validators to either panic or hang
182///    forever deterministically.
183/// 2. If we ever ship a fix to make it no longer panic or hang when executing
184///    such transaction, we must make sure the transaction is already in this
185///    list. Otherwise nodes running the newer version without these
186///    transactions in the list will generate forked result.
187///
188/// Below is a scenario of when we need to use this list:
189/// 1. We detect that a specific transaction is causing all validators to either
190///    panic or hang forever deterministically.
191/// 2. We push a CertificateDenyConfig to deny such transaction to all
192///    validators asap.
193/// 3. To make sure that all fullnodes are able to sync to the latest version,
194///    we need to add the transaction digest to this list as well asap, and ship
195///    this binary to all fullnodes, so that they can sync past this
196///    transaction.
197/// 4. We then can start fixing the issue, and ship the fix to all nodes.
198/// 5. Unfortunately, we can't remove the transaction digest from this list,
199///    because if we do so, any future node that sync from genesis will fork on
200///    this transaction. We may be able to remove it once we have stable
201///    snapshots and the binary has a minimum supported protocol version past
202///    the epoch.
203pub fn get_denied_certificates() -> &'static HashSet<TransactionDigest> {
204    static DENIED_CERTIFICATES: Lazy<HashSet<TransactionDigest>> = Lazy::new(|| HashSet::from([]));
205    Lazy::force(&DENIED_CERTIFICATES)
206}
207
208pub fn is_certificate_denied(
209    transaction_digest: &TransactionDigest,
210    certificate_deny_set: &HashSet<TransactionDigest>,
211) -> bool {
212    certificate_deny_set.contains(transaction_digest)
213        || get_denied_certificates().contains(transaction_digest)
214}