iota_types/
execution.rs

1// Copyright (c) Mysten Labs, Inc.
2// Modifications Copyright (c) 2024 IOTA Stiftung
3// SPDX-License-Identifier: Apache-2.0
4
5use std::collections::{BTreeMap, BTreeSet, HashSet};
6
7use iota_sdk_types::TypeTag;
8use once_cell::sync::Lazy;
9use serde::{Deserialize, Serialize};
10
11use crate::{
12    base_types::{ObjectID, ObjectRef, SequenceNumber},
13    digests::{ObjectDigest, TransactionDigest},
14    event::Event,
15    object::{Data, MoveObjectExt, Object, Owner},
16    storage::BackingPackageStore,
17    transaction::Argument,
18};
19
20/// A type containing all of the information needed to work with a deleted
21/// shared object in execution and when committing the execution effects of the
22/// transaction. This holds:
23/// 0. The object ID of the deleted shared object.
24/// 1. The version of the shared object.
25/// 2. Whether the object appeared as mutable (or owned) in the transaction, or
26///    as a read-only shared object.
27/// 3. The transaction digest of the previous transaction that used this shared
28///    object mutably or took it by value.
29pub type DeletedSharedObjectInfo = (ObjectID, SequenceNumber, bool, TransactionDigest);
30
31/// A sequence of information about deleted shared objects in the transaction's
32/// inputs.
33pub type DeletedSharedObjects = Vec<DeletedSharedObjectInfo>;
34
35#[derive(Clone, Debug, PartialEq, Eq)]
36pub enum SharedInput {
37    Existing(ObjectRef),
38    Deleted(DeletedSharedObjectInfo),
39    Cancelled((ObjectID, SequenceNumber)),
40}
41
42#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
43pub struct DynamicallyLoadedObjectMetadata {
44    pub version: SequenceNumber,
45    pub digest: ObjectDigest,
46    pub owner: Owner,
47    pub storage_rebate: u64,
48    pub previous_transaction: TransactionDigest,
49}
50
51/// View of the store necessary to produce the layouts of types.
52pub trait TypeLayoutStore: BackingPackageStore {}
53impl<T> TypeLayoutStore for T where T: BackingPackageStore {}
54
55#[derive(Debug)]
56pub enum ExecutionResults {
57    V1(ExecutionResultsV1),
58}
59
60/// Used by iota-execution v1 and above, to capture the execution results from
61/// Move. The results represent the primitive information that can then be used
62/// to construct both transaction effect V1.
63#[derive(Debug, Default)]
64pub struct ExecutionResultsV1 {
65    /// All objects written regardless of whether they were mutated, created, or
66    /// unwrapped.
67    pub written_objects: BTreeMap<ObjectID, Object>,
68    /// All objects that existed prior to this transaction, and are modified in
69    /// this transaction. This includes any type of modification, including
70    /// mutated, wrapped and deleted objects.
71    pub modified_objects: BTreeSet<ObjectID>,
72    /// All object IDs created in this transaction.
73    pub created_object_ids: BTreeSet<ObjectID>,
74    /// All object IDs deleted in this transaction.
75    /// No object ID should be in both created_object_ids and
76    /// deleted_object_ids.
77    pub deleted_object_ids: BTreeSet<ObjectID>,
78    /// All Move events emitted in this transaction.
79    pub user_events: Vec<Event>,
80}
81
82pub type ExecutionResult = (
83    // mutable_reference_outputs
84    Vec<(Argument, Vec<u8>, TypeTag)>,
85    // return_values
86    Vec<(Vec<u8>, TypeTag)>,
87);
88
89impl ExecutionResultsV1 {
90    pub fn drop_writes(&mut self) {
91        self.written_objects.clear();
92        self.modified_objects.clear();
93        self.created_object_ids.clear();
94        self.deleted_object_ids.clear();
95        self.user_events.clear();
96    }
97
98    pub fn merge_results(&mut self, new_results: Self) {
99        self.written_objects.extend(new_results.written_objects);
100        self.modified_objects.extend(new_results.modified_objects);
101        self.created_object_ids
102            .extend(new_results.created_object_ids);
103        self.deleted_object_ids
104            .extend(new_results.deleted_object_ids);
105        self.user_events.extend(new_results.user_events);
106    }
107
108    pub fn update_version_and_previous_tx(
109        &mut self,
110        lamport_version: SequenceNumber,
111        prev_tx: TransactionDigest,
112        input_objects: &BTreeMap<ObjectID, Object>,
113    ) {
114        for (id, obj) in self.written_objects.iter_mut() {
115            // TODO: We can now get rid of the following logic by passing in lamport version
116            // into the execution layer, and create new objects using the lamport version
117            // directly.
118
119            // Update the version for the written object.
120            match &mut obj.data {
121                Data::Struct(obj) => {
122                    // Move objects all get the transaction's lamport timestamp
123                    obj.increment_version_to(lamport_version);
124                }
125
126                Data::Package(pkg) => {
127                    // Modified packages get their version incremented (this is a special case that
128                    // only applies to system packages).  All other packages can only be created,
129                    // and they are left alone.
130                    if self.modified_objects.contains(id) {
131                        debug_assert!(id.is_system_package());
132                        pkg.increment_version()
133                            .expect("package version should never overflow");
134                    }
135                }
136            }
137
138            // Record the version that the shared object was created at in its owner field.
139            // Note, this only works because shared objects must be created as
140            // shared (not created as owned in one transaction and later
141            // converted to shared in another).
142            if let Owner::Shared(initial_shared_version) = &mut obj.owner {
143                if self.created_object_ids.contains(id) {
144                    assert_eq!(
145                        *initial_shared_version,
146                        SequenceNumber::default(),
147                        "Initial version should be blank before this point for {id:?}",
148                    );
149                    *initial_shared_version = lamport_version;
150                }
151
152                // Update initial_shared_version for reshared objects
153                if let Some(previous_initial_shared_version) = input_objects
154                    .get(id)
155                    .and_then(|obj| obj.owner.as_shared_opt())
156                {
157                    debug_assert!(!self.created_object_ids.contains(id));
158                    debug_assert!(!self.deleted_object_ids.contains(id));
159                    debug_assert!(
160                        *initial_shared_version == SequenceNumber::default()
161                            || *initial_shared_version == *previous_initial_shared_version
162                    );
163
164                    *initial_shared_version = *previous_initial_shared_version;
165                }
166            }
167
168            obj.previous_transaction = prev_tx;
169        }
170    }
171}
172
173/// If a transaction digest shows up in this list, when executing such
174/// transaction, we will always return `ExecutionError::CertificateDenied`
175/// without executing it (but still do gas smashing). Because this list is not
176/// gated by protocol version, there are a few important criteria for adding a
177/// digest to this list:
178/// 1. The certificate must be causing all validators to either panic or hang
179///    forever deterministically.
180/// 2. If we ever ship a fix to make it no longer panic or hang when executing
181///    such transaction, we must make sure the transaction is already in this
182///    list. Otherwise nodes running the newer version without these
183///    transactions in the list will generate forked result.
184///
185/// Below is a scenario of when we need to use this list:
186/// 1. We detect that a specific transaction is causing all validators to either
187///    panic or hang forever deterministically.
188/// 2. We push a CertificateDenyConfig to deny such transaction to all
189///    validators asap.
190/// 3. To make sure that all fullnodes are able to sync to the latest version,
191///    we need to add the transaction digest to this list as well asap, and ship
192///    this binary to all fullnodes, so that they can sync past this
193///    transaction.
194/// 4. We then can start fixing the issue, and ship the fix to all nodes.
195/// 5. Unfortunately, we can't remove the transaction digest from this list,
196///    because if we do so, any future node that sync from genesis will fork on
197///    this transaction. We may be able to remove it once we have stable
198///    snapshots and the binary has a minimum supported protocol version past
199///    the epoch.
200pub fn get_denied_certificates() -> &'static HashSet<TransactionDigest> {
201    static DENIED_CERTIFICATES: Lazy<HashSet<TransactionDigest>> = Lazy::new(|| HashSet::from([]));
202    Lazy::force(&DENIED_CERTIFICATES)
203}
204
205pub fn is_certificate_denied(
206    transaction_digest: &TransactionDigest,
207    certificate_deny_set: &HashSet<TransactionDigest>,
208) -> bool {
209    certificate_deny_set.contains(transaction_digest)
210        || get_denied_certificates().contains(transaction_digest)
211}