pub struct AccountsDb {
Show 52 fields pub accounts_index: AccountsIndex<AccountInfo, AccountInfo>, pub accounts_hash_complete_oldest_non_ancient_slot: RwLock<Slot>, pub ancient_append_vec_offset: Option<i64>, pub skip_initial_hash_calc: bool, pub(crate) storage: AccountStorage, pub(crate) assert_stakes_cache_consistency: bool, create_ancient_storage: CreateAncientStorage, pub accounts_cache: AccountsCache, write_cache_limit_bytes: Option<u64>, sender_bg_hasher: Option<Sender<CachedAccount>>, read_only_accounts_cache: ReadOnlyAccountsCache, recycle_stores: RwLock<RecycleStores>, pub next_id: AtomicAppendVecId, pub shrink_candidate_slots: Mutex<HashMap<Slot, Arc<AccountStorageEntry>>>, pub(crate) write_version: AtomicU64, pub(crate) paths: Vec<PathBuf>, full_accounts_hash_cache_path: PathBuf, incremental_accounts_hash_cache_path: PathBuf, temp_accounts_hash_cache_path: Option<TempDir>, pub shrink_paths: RwLock<Option<Vec<PathBuf>>>, pub(crate) temp_paths: Option<Vec<TempDir>>, file_size: u64, pub thread_pool: ThreadPool, pub thread_pool_clean: ThreadPool, bank_hash_stats: Mutex<HashMap<Slot, BankHashStats>>, accounts_delta_hashes: Mutex<HashMap<Slot, AccountsDeltaHash>>, accounts_hashes: Mutex<HashMap<Slot, (AccountsHash, u64)>>, incremental_accounts_hashes: Mutex<HashMap<Slot, (IncrementalAccountsHash, u64)>>, pub stats: AccountsStats, clean_accounts_stats: CleanAccountsStats, external_purge_slots_stats: PurgeStats, pub(crate) shrink_stats: ShrinkStats, pub(crate) shrink_ancient_stats: ShrinkAncientStats, pub cluster_type: Option<ClusterType>, pub account_indexes: AccountSecondaryIndexes, uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>>, is_bank_drop_callback_enabled: AtomicBool, remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization, shrink_ratio: AccountShrinkThreshold, dirty_stores: DashMap<Slot, Arc<AccountStorageEntry>>, zero_lamport_accounts_to_purge_after_full_snapshot: DashSet<(Slot, Pubkey)>, accounts_update_notifier: Option<AccountsUpdateNotifier>, filler_accounts_config: FillerAccountsConfig, pub filler_account_suffix: Option<Pubkey>, pub(crate) active_stats: ActiveStats, pub filler_accounts_per_slot: AtomicU64, pub filler_account_slots_remaining: AtomicU64, pub(crate) verify_accounts_hash_in_bg: VerifyAccountsHashInBackground, pub(crate) log_dead_slots: AtomicBool, exhaustively_verify_refcounts: bool, pub epoch_accounts_hash_manager: EpochAccountsHashManager, pub(crate) bank_progress: BankCreationFreezingProgress,
}

Fields§

§accounts_index: AccountsIndex<AccountInfo, AccountInfo>

Keeps tracks of index into AppendVec on a per slot basis

§accounts_hash_complete_oldest_non_ancient_slot: RwLock<Slot>

oldest slot that is not ancient, relative to the highest slot where accounts hash calculation has completed

§ancient_append_vec_offset: Option<i64>

Some(offset) iff we want to squash old append vecs together into ‘ancient append vecs’ Some(offset) means for slots up to (max_slot - (slots_per_epoch - ‘offset’)), put them in ancient append vecs

§skip_initial_hash_calc: bool

true iff we want to skip the initial hash calculation on startup

§storage: AccountStorage§assert_stakes_cache_consistency: bool

from AccountsDbConfig

§create_ancient_storage: CreateAncientStorage

from AccountsDbConfig

§accounts_cache: AccountsCache§write_cache_limit_bytes: Option<u64>§sender_bg_hasher: Option<Sender<CachedAccount>>§read_only_accounts_cache: ReadOnlyAccountsCache§recycle_stores: RwLock<RecycleStores>§next_id: AtomicAppendVecId

distribute the accounts across storage lists

§shrink_candidate_slots: Mutex<HashMap<Slot, Arc<AccountStorageEntry>>>

Set of shrinkable stores organized by map of slot to append_vec_id

§write_version: AtomicU64§paths: Vec<PathBuf>

Set of storage paths to pick from

§full_accounts_hash_cache_path: PathBuf§incremental_accounts_hash_cache_path: PathBuf§temp_accounts_hash_cache_path: Option<TempDir>§shrink_paths: RwLock<Option<Vec<PathBuf>>>§temp_paths: Option<Vec<TempDir>>

Directory of paths this accounts_db needs to hold/remove

§file_size: u64

Starting file size of appendvecs

§thread_pool: ThreadPool

Thread pool used for par_iter

§thread_pool_clean: ThreadPool§bank_hash_stats: Mutex<HashMap<Slot, BankHashStats>>§accounts_delta_hashes: Mutex<HashMap<Slot, AccountsDeltaHash>>§accounts_hashes: Mutex<HashMap<Slot, (AccountsHash, u64)>>§incremental_accounts_hashes: Mutex<HashMap<Slot, (IncrementalAccountsHash, u64)>>§stats: AccountsStats§clean_accounts_stats: CleanAccountsStats§external_purge_slots_stats: PurgeStats§shrink_stats: ShrinkStats§shrink_ancient_stats: ShrinkAncientStats§cluster_type: Option<ClusterType>§account_indexes: AccountSecondaryIndexes§uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>>

Set of unique keys per slot which is used to drive clean_accounts Generated by calculate_accounts_delta_hash

§is_bank_drop_callback_enabled: AtomicBool

true if drop_callback is attached to the bank.

§remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization

Set of slots currently being flushed by flush_slot_cache() or removed by remove_unrooted_slot(). Used to ensure remove_unrooted_slots(slots) can safely clear the set of unrooted slots slots.

§shrink_ratio: AccountShrinkThreshold§dirty_stores: DashMap<Slot, Arc<AccountStorageEntry>>

Set of stores which are recently rooted or had accounts removed such that potentially a 0-lamport account update could be present which means we can remove the account from the index entirely.

§zero_lamport_accounts_to_purge_after_full_snapshot: DashSet<(Slot, Pubkey)>

Zero-lamport accounts that are not purged during clean because they need to stay alive for incremental snapshot support.

§accounts_update_notifier: Option<AccountsUpdateNotifier>

GeyserPlugin accounts update notifier

§filler_accounts_config: FillerAccountsConfig§filler_account_suffix: Option<Pubkey>§active_stats: ActiveStats§filler_accounts_per_slot: AtomicU64

number of filler accounts to add for each slot

§filler_account_slots_remaining: AtomicU64

number of slots remaining where filler accounts should be added

§verify_accounts_hash_in_bg: VerifyAccountsHashInBackground§log_dead_slots: AtomicBool

Used to disable logging dead slots during removal. allow disabling noisy log

§exhaustively_verify_refcounts: bool

debug feature to scan every append vec and verify refcounts are equal

§epoch_accounts_hash_manager: EpochAccountsHashManager

the full accounts hash calculation as of a predetermined block height ‘N’ to be included in the bank hash at a predetermined block height ‘M’ The cadence is once per epoch, all nodes calculate a full accounts hash as of a known slot calculated using ‘N’ Some time later (to allow for slow calculation time), the bank hash at a slot calculated using ‘M’ includes the full accounts hash. Thus, the state of all accounts on a validator is known to be correct at least once per epoch.

§bank_progress: BankCreationFreezingProgress

Implementations§

source§

impl AccountsDb

source

pub fn notify_account_restore_from_snapshot(&self)

Notify the plugins of of account data when AccountsDb is restored from a snapshot. The data is streamed in the reverse order of the slots so that an account is only streamed once. At a slot, if the accounts is updated multiple times only the last write (with highest write_version) is notified.

source

pub fn notify_account_at_accounts_update<P>( &self, slot: Slot, account: &AccountSharedData, txn: &Option<&SanitizedTransaction>, pubkey: &Pubkey, write_version_producer: &mut P )where P: Iterator<Item = u64>,

source

fn notify_accounts_in_slot( &self, slot: Slot, notified_accounts: &mut HashSet<Pubkey>, notify_stats: &mut GeyserPluginNotifyAtSnapshotRestoreStats )

source

fn notify_filtered_accounts( &self, slot: Slot, notified_accounts: &mut HashSet<Pubkey>, accounts_to_stream: HashMap<Pubkey, StoredAccountMeta<'_>>, notify_stats: &mut GeyserPluginNotifyAtSnapshotRestoreStats )

source§

impl AccountsDb

source

pub const ACCOUNTS_HASH_CACHE_DIR: &str = "accounts_hash_cache"

source

pub fn default_for_tests() -> Self

source

fn default_with_accounts_index( accounts_index: AccountsIndex<AccountInfo, AccountInfo>, accounts_hash_cache_path: Option<PathBuf> ) -> Self

source

pub fn new_for_tests(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self

source

pub fn new_for_tests_with_caching( paths: Vec<PathBuf>, cluster_type: &ClusterType ) -> Self

source

pub fn new_with_config( paths: Vec<PathBuf>, cluster_type: &ClusterType, account_indexes: AccountSecondaryIndexes, shrink_ratio: AccountShrinkThreshold, accounts_db_config: Option<AccountsDbConfig>, accounts_update_notifier: Option<AccountsUpdateNotifier>, exit: &Arc<AtomicBool> ) -> Self

source

fn init_gradual_filler_accounts(&self, slots_per_epoch: Slot)

Gradual means filler accounts will be added over the course of an epoch, during cache flush. This is in contrast to adding all the filler accounts immediately before the validator starts.

source

pub fn set_shrink_paths(&self, paths: Vec<PathBuf>)

source

pub fn file_size(&self) -> u64

source

pub fn new_single_for_tests() -> Self

source

pub fn new_single_for_tests_with_caching() -> Self

source

pub fn new_single_for_tests_with_secondary_indexes( secondary_indexes: AccountSecondaryIndexes ) -> Self

source

fn next_id(&self) -> AppendVecId

source

fn new_storage_entry( &self, slot: Slot, path: &Path, size: u64 ) -> AccountStorageEntry

source

pub fn expected_cluster_type(&self) -> ClusterType

source

fn clean_accounts_older_than_root( &self, purges: Vec<Pubkey>, max_clean_root_inclusive: Option<Slot>, ancient_account_cleans: &AtomicU64 ) -> ((HashMap<Pubkey, HashSet<Slot>>, HashMap<Slot, HashSet<usize>>), HashSet<Pubkey>)

Reclaim older states of accounts older than max_clean_root_inclusive for AccountsDb bloat mitigation. Any accounts which are removed from the accounts index are returned in PubkeysRemovedFromAccountsIndex. These should NOT be unref’d later from the accounts index.

source

fn do_reset_uncleaned_roots(&self, max_clean_root: Option<Slot>)

source

fn calc_delete_dependencies( purges: &HashMap<Pubkey, (SlotList<AccountInfo>, RefCount)>, store_counts: &mut HashMap<Slot, (usize, HashSet<Pubkey>)>, min_slot: Option<Slot> )

increment store_counts to non-zero for all stores that can not be deleted. a store cannot be deleted if:

  1. one of the pubkeys in the store has account info to a store whose store count is not going to zero
  2. a pubkey we were planning to remove is not removing all stores that contain the account
source

fn background_hasher(receiver: Receiver<CachedAccount>)

source

fn start_background_hasher(&mut self)

source

pub(crate) fn purge_keys_exact<'a, C>( &'a self, pubkey_to_slot_set: impl Iterator<Item = &'a (Pubkey, C)> ) -> (Vec<(Slot, AccountInfo)>, HashSet<Pubkey>)where C: Contains<'a, Slot> + 'a,

source

fn max_clean_root(&self, proposed_clean_root: Option<Slot>) -> Option<Slot>

source

fn get_oldest_slot_within_one_epoch_prior( slot: Slot, epoch_schedule: &EpochSchedule ) -> Slot

return slot - slots_in_epoch + 1 The resulting slot is within one epoch length of slot

source

pub fn notify_accounts_hash_calculated_complete( &self, completed_slot: Slot, epoch_schedule: &EpochSchedule )

hash calc is completed as of ‘slot’ so, any process that wants to take action on really old slots can now proceed up to ‘completed_slot’-slots per epoch

source

fn get_accounts_hash_complete_oldest_non_ancient_slot(&self) -> Slot

get the oldest slot that is within one epoch of the highest slot that has been used for hash calculation. The slot will have been offset by self.ancient_append_vec_offset

source

fn collect_uncleaned_slots_up_to_slot( &self, max_slot_inclusive: Slot ) -> Vec<Slot>

Collect all the uncleaned slots, up to a max slot

Search through the uncleaned Pubkeys and return all the slots, up to a maximum slot.

source

fn remove_uncleaned_slots_and_collect_pubkeys( &self, uncleaned_slots: Vec<Slot> ) -> Vec<Vec<Pubkey>>

Remove slots from uncleaned_pubkeys and collect all pubkeys

For each slot in the list of uncleaned slots, remove it from the uncleaned_pubkeys Map and collect all the pubkeys to return.

source

fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot( &self, max_slot_inclusive: Slot ) -> Vec<Vec<Pubkey>>

Remove uncleaned slots, up to a maximum slot, and return the collected pubkeys

source

fn construct_candidate_clean_keys( &self, max_clean_root_inclusive: Option<Slot>, is_startup: bool, last_full_snapshot_slot: Option<Slot>, timings: &mut CleanKeyTimings ) -> (Vec<Pubkey>, Option<Slot>)

Construct a vec of pubkeys for cleaning from: uncleaned_pubkeys - the delta set of updated pubkeys in rooted slots from the last clean dirty_stores - set of stores which had accounts removed or recently rooted returns the minimum slot we encountered

source

pub fn clean_accounts_for_tests(&self)

Call clean_accounts() with the common parameters that tests/benches use.

source

fn exhaustively_verify_refcounts(&self, max_slot_inclusive: Option<Slot>)

called with cli argument to verify refcounts are correct on all accounts this is very slow

source

pub fn clean_accounts( &self, max_clean_root_inclusive: Option<Slot>, is_startup: bool, last_full_snapshot_slot: Option<Slot> )

source

fn handle_reclaims<'a, I>( &'a self, reclaims: Option<I>, expected_single_dead_slot: Option<Slot>, purge_stats_and_reclaim_result: Option<(&PurgeStats, &mut (HashMap<Pubkey, HashSet<Slot>>, HashMap<Slot, HashSet<usize>>))>, reset_accounts: bool, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )where I: Iterator<Item = &'a (Slot, AccountInfo)>,

Removes the accounts in the input reclaims from the tracked “count” of their corresponding storage entries. Note this does not actually free the memory from the storage entries until all the storage entries for a given slot S are empty, at which point process_dead_slots will remove all the storage entries for S.

Arguments
  • reclaims - The accounts to remove from storage entries’ “count”. Note here that we should not remove cache entries, only entries for accounts actually stored in a storage entry.

  • expected_single_dead_slot - A correctness assertion. If this is equal to Some(S), then the function will check that the only slot being cleaned up in reclaims is the slot == S. This is true for instance when handle_reclaims is called from store or slot shrinking, as those should only touch the slot they are currently storing to or shrinking.

  • purge_stats_and_reclaim_result - Option containing purge_stats and reclaim_result. purge_stats. purge_stats are stats used to track performance of purging dead slots. reclaim_result contains information about accounts that were removed from storage, does not include accounts that were removed from the cache. If purge_stats_and_reclaim_result.is_none(), this implies there can be no dead slots that happen as a result of this call, and the function will check that no slots are cleaned up/removed via process_dead_slots. For instance, on store, no slots should be cleaned up, but during the background clean accounts purges accounts from old rooted slots, so outdated slots may be removed.

  • reset_accounts - Reset the append_vec store when the store is dead (count==0) From the clean and shrink paths it should be false since there may be an in-progress hash operation and the stores may hold accounts that need to be unref’ed.

  • pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index and should not be unref’d. If they exist in the accounts index, they are NEW.

source

fn filter_zero_lamport_clean_for_incremental_snapshots( &self, max_clean_root_inclusive: Option<Slot>, last_full_snapshot_slot: Option<Slot>, store_counts: &HashMap<Slot, (usize, HashSet<Pubkey>)>, purges_zero_lamports: &mut HashMap<Pubkey, (SlotList<AccountInfo>, RefCount)> )

During clean, some zero-lamport accounts that are marked for purge should not actually get purged. Filter out those accounts here by removing them from ‘purges_zero_lamports’

When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher than the last full snapshot slot. This is to protect against the following scenario:

A full snapshot is taken, including account 'alpha' with a non-zero balance.  In a later slot,
alpha's lamports go to zero.  Eventually, cleaning runs.  Without this change,
alpha would be cleaned up and removed completely. Finally, an incremental snapshot is taken.

Later, the incremental and full snapshots are used to rebuild the bank and accounts
database (e.x. if the node restarts).  The full snapshot _does_ contain alpha
and its balance is non-zero.  However, since alpha was cleaned up in a slot after the full
snapshot slot (due to having zero lamports), the incremental snapshot would not contain alpha.
Thus, the accounts database will contain the old, incorrect info for alpha with a non-zero
balance.  Very bad!

This filtering step can be skipped if there is no last_full_snapshot_slot, or if the max_clean_root_inclusive is less-than-or-equal-to the last_full_snapshot_slot.

source

fn process_dead_slots( &self, dead_slots: &HashSet<Slot>, purged_account_slots: Option<&mut HashMap<Pubkey, HashSet<Slot>>>, purge_stats: &PurgeStats, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )

pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index and should not be unref’d. If they exist in the accounts index, they are NEW.

source

fn load_accounts_index_for_shrink<'a, T: ShrinkCollectRefs<'a>>( &self, accounts: &'a [StoredAccountMeta<'a>], stats: &ShrinkStats, slot_to_shrink: Slot ) -> LoadAccountsIndexForShrink<'a, T>

load the account index entry for the first count items in accounts store a reference to all alive accounts in alive_accounts unref and optionally store a reference to all pubkeys that are in the index, but dead in unrefed_pubkeys return sum of account size for all alive accounts

source

pub(crate) fn get_unique_accounts_from_storage<'a>( &self, store: &'a Arc<AccountStorageEntry> ) -> GetUniqueAccountsResult<'a>

get all accounts in all the storages passed in for duplicate pubkeys, the account with the highest write_value is returned

source

pub(crate) fn get_unique_accounts_from_storage_for_shrink<'a>( &self, store: &'a Arc<AccountStorageEntry>, stats: &ShrinkStats ) -> GetUniqueAccountsResult<'a>

source

pub(crate) fn shrink_collect<'a: 'b, 'b, T: ShrinkCollectRefs<'b>>( &self, store: &'a Arc<AccountStorageEntry>, unique_accounts: &'b GetUniqueAccountsResult<'b>, stats: &ShrinkStats ) -> ShrinkCollect<'b, T>

shared code for shrinking normal slots and combining into ancient append vecs note ‘unique_accounts’ is passed by ref so we can return references to data within it, avoiding self-references

source

pub(crate) fn remove_old_stores_shrink<'a, T: ShrinkCollectRefs<'a>>( &self, shrink_collect: &ShrinkCollect<'a, T>, stats: &ShrinkStats, shrink_in_progress: Option<ShrinkInProgress<'_>>, shrink_can_be_active: bool )

common code from shrink and combine_ancient_slots get rid of all original store_ids in the slot

source

fn do_shrink_slot_store(&self, slot: Slot, store: &Arc<AccountStorageEntry>)

source

pub(crate) fn update_shrink_stats( shrink_stats: &ShrinkStats, stats_sub: ShrinkStatsSub )

source

pub(crate) fn mark_dirty_dead_stores( &self, slot: Slot, add_dirty_stores: bool, shrink_in_progress: Option<ShrinkInProgress<'_>>, shrink_can_be_active: bool ) -> Vec<Arc<AccountStorageEntry>>

get stores for ‘slot’ Drop ‘shrink_in_progress’, which will cause the old store to be removed from the storage map. For ‘shrink_in_progress’.‘old_storage’ which is not retained, insert in ‘dead_storages’ and optionally ‘dirty_stores’ This is the end of the life cycle of shrink_in_progress.

source

pub(crate) fn drop_or_recycle_stores( &self, dead_storages: Vec<Arc<AccountStorageEntry>>, stats: &ShrinkStats )

source

pub(crate) fn get_store_for_shrink( &self, slot: Slot, aligned_total: u64 ) -> ShrinkInProgress<'_>

return a store that can contain ‘aligned_total’ bytes

source

fn shrink_slot_forced(&self, slot: Slot)

source

fn all_slots_in_storage(&self) -> Vec<Slot>

source

fn select_candidates_by_total_usage( shrink_slots: &HashMap<Slot, Arc<AccountStorageEntry>>, shrink_ratio: f64, oldest_non_ancient_slot: Option<Slot> ) -> (HashMap<Slot, Arc<AccountStorageEntry>>, HashMap<Slot, Arc<AccountStorageEntry>>)

Given the input ShrinkCandidates, this function sorts the stores by their alive ratio in increasing order with the most sparse entries in the front. It will then simulate the shrinking by working on the most sparse entries first and if the overall alive ratio is achieved, it will stop and return the filtered-down candidates and the candidates which are skipped in this round and might be eligible for the future shrink.

source

fn get_roots_less_than(&self, slot: Slot) -> Vec<Slot>

source

fn get_prior_root(&self, slot: Slot) -> Option<Slot>

source

fn get_sorted_potential_ancient_slots(&self) -> Vec<Slot>

return all slots that are more than one epoch old and thus could already be an ancient append vec or which could need to be combined into a new or existing ancient append vec offset is used to combine newer slots than we normally would. This is designed to be used for testing.

source

fn shrink_ancient_slots(&self)

get a sorted list of slots older than an epoch squash those slots into ancient append vecs

source

fn get_keys_to_unref_ancient<'a>( accounts: &'a [&StoredAccountMeta<'_>], existing_ancient_pubkeys: &mut HashSet<Pubkey> ) -> HashSet<&'a Pubkey>

‘accounts’ that exist in the current slot we are combining into a different ancient slot ‘existing_ancient_pubkeys’: pubkeys that exist currently in the ancient append vec slot returns the pubkeys that are in ‘accounts’ that are already in ‘existing_ancient_pubkeys’ Also updated ‘existing_ancient_pubkeys’ to include all pubkeys in ‘accounts’ since they will soon be written into the ancient slot.

source

fn unref_accounts_already_in_storage( &self, accounts: &[&StoredAccountMeta<'_>], existing_ancient_pubkeys: &mut HashSet<Pubkey> )

‘accounts’ are about to be appended to an ancient append vec. That ancient append vec may already have some accounts. Unref each account in ‘accounts’ that already exists in ‘existing_ancient_pubkeys’. As a side effect, on exit, ‘existing_ancient_pubkeys’ will now contain all pubkeys in ‘accounts’.

source

fn get_storage_to_move_to_ancient_append_vec( &self, slot: Slot, current_ancient: &mut CurrentAncientAppendVec, can_randomly_shrink: bool ) -> Option<Arc<AccountStorageEntry>>

get the storage from ‘slot’ to squash or None if this slot should be skipped side effect could be updating ‘current_ancient’

source

fn should_move_to_ancient_append_vec( &self, storage: &Arc<AccountStorageEntry>, current_ancient: &mut CurrentAncientAppendVec, slot: Slot, can_randomly_shrink: bool ) -> bool

return true if the accounts in this slot should be moved to an ancient append vec otherwise, return false and the caller can skip this slot side effect could be updating ‘current_ancient’ can_randomly_shrink: true if ancient append vecs that otherwise don’t qualify to be shrunk can be randomly shrunk this is convenient for a running system this is not useful for testing

source

fn combine_ancient_slots( &self, sorted_slots: Vec<Slot>, can_randomly_shrink: bool )

Combine all account data from storages in ‘sorted_slots’ into ancient append vecs. This keeps us from accumulating append vecs for each slot older than an epoch.

source

fn combine_one_store_into_ancient( &self, slot: Slot, old_storage: &Arc<AccountStorageEntry>, current_ancient: &mut CurrentAncientAppendVec, ancient_slot_pubkeys: &mut AncientSlotPubkeys, dropped_roots: &mut Vec<Slot> )

put entire alive contents of ‘old_storage’ into the current ancient append vec or a newly created ancient append vec

source

pub(crate) fn handle_dropped_roots_for_ancient( &self, dropped_roots: impl Iterator<Item = Slot> )

each slot in ‘dropped_roots’ has been combined into an ancient append vec. We are done with the slot now forever.

source

fn add_uncleaned_pubkeys_after_shrink( &self, slot: Slot, pubkeys: impl Iterator<Item = Pubkey> )

add all ‘pubkeys’ into the set of pubkeys that are ‘uncleaned’, associated with ‘slot’ clean will visit these pubkeys next time it runs

source

pub fn shrink_candidate_slots(&self) -> usize

source

pub fn shrink_all_slots( &self, is_startup: bool, last_full_snapshot_slot: Option<Slot> )

source

pub fn scan_accounts<F>( &self, ancestors: &Ancestors, bank_id: BankId, scan_func: F, config: &ScanConfig ) -> ScanResult<()>where F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>),

source

pub fn unchecked_scan_accounts<F>( &self, metric_name: &'static str, ancestors: &Ancestors, scan_func: F, config: &ScanConfig )where F: FnMut(&Pubkey, LoadedAccount<'_>, Slot),

source

pub fn range_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, range: R, config: &ScanConfig, scan_func: F )where F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>), R: RangeBounds<Pubkey> + Debug,

Only guaranteed to be safe when called from rent collection

source

pub fn index_scan_accounts<F>( &self, ancestors: &Ancestors, bank_id: BankId, index_key: IndexKey, scan_func: F, config: &ScanConfig ) -> ScanResult<bool>where F: FnMut(Option<(&Pubkey, AccountSharedData, Slot)>),

source

pub fn scan_account_storage<R, B>( &self, slot: Slot, cache_map_func: impl Fn(LoadedAccount<'_>) -> Option<R> + Sync, storage_scan_func: impl Fn(&B, LoadedAccount<'_>) + Sync ) -> ScanStorageResult<R, B>where R: Send, B: Send + Default + Sync,

Scan a specific slot through all the account storage

source

pub(crate) fn insert_default_bank_hash_stats( &self, slot: Slot, parent_slot: Slot )

Insert a default bank hash stats for slot

This fn is called when creating a new bank from parent.

source

pub fn load( &self, ancestors: &Ancestors, pubkey: &Pubkey, load_hint: LoadHint ) -> Option<(AccountSharedData, Slot)>

source

pub fn account_matches_owners( &self, ancestors: &Ancestors, account: &Pubkey, owners: &[&Pubkey] ) -> Result<usize, MatchAccountOwnerError>

Return Ok(index_of_matching_owner) if the account owner at offset is one of the pubkeys in owners. Return Err(MatchAccountOwnerError::NoMatch) if the account has 0 lamports or the owner is not one of the pubkeys in owners. Return Err(MatchAccountOwnerError::UnableToLoad) if the account could not be accessed.

source

pub fn load_account_into_read_cache( &self, ancestors: &Ancestors, pubkey: &Pubkey )

source

pub fn load_with_fixed_root( &self, ancestors: &Ancestors, pubkey: &Pubkey ) -> Option<(AccountSharedData, Slot)>

note this returns None for accounts with zero lamports

source

fn read_index_for_accessor_or_load_slow<'a>( &'a self, ancestors: &Ancestors, pubkey: &'a Pubkey, max_root: Option<Slot>, clone_in_lock: bool ) -> Option<(Slot, StorageLocation, Option<LoadedAccountAccessor<'a>>)>

source

fn retry_to_get_account_accessor<'a>( &'a self, slot: Slot, storage_location: StorageLocation, ancestors: &'a Ancestors, pubkey: &'a Pubkey, max_root: Option<Slot>, load_hint: LoadHint ) -> Option<(LoadedAccountAccessor<'a>, Slot)>

source

fn do_load( &self, ancestors: &Ancestors, pubkey: &Pubkey, max_root: Option<Slot>, load_hint: LoadHint, load_zero_lamports: LoadZeroLamports ) -> Option<(AccountSharedData, Slot)>

source

pub fn flush_read_only_cache_for_tests(&self)

remove all entries from the read only accounts cache useful for benches/tests

source

fn do_load_with_populate_read_cache( &self, ancestors: &Ancestors, pubkey: &Pubkey, max_root: Option<Slot>, load_hint: LoadHint, load_into_read_cache_only: bool, load_zero_lamports: LoadZeroLamports ) -> Option<(AccountSharedData, Slot)>

if ‘load_into_read_cache_only’, then return value is meaningless. The goal is to get the account into the read-only cache.

source

pub fn load_account_hash( &self, ancestors: &Ancestors, pubkey: &Pubkey, max_root: Option<Slot>, load_hint: LoadHint ) -> Option<Hash>

source

fn get_account_accessor<'a>( &'a self, slot: Slot, pubkey: &'a Pubkey, storage_location: &StorageLocation ) -> LoadedAccountAccessor<'a>

source

fn try_recycle_and_insert_store( &self, slot: Slot, min_size: u64, max_size: u64 ) -> Option<Arc<AccountStorageEntry>>

source

fn try_recycle_store( &self, slot: Slot, min_size: u64, max_size: u64 ) -> Option<Arc<AccountStorageEntry>>

source

fn find_storage_candidate( &self, slot: Slot, size: usize ) -> Arc<AccountStorageEntry>

source

pub(crate) fn page_align(size: u64) -> u64

source

fn has_space_available(&self, slot: Slot, size: u64) -> bool

source

fn create_store( &self, slot: Slot, size: u64, from: &str, paths: &[PathBuf] ) -> Arc<AccountStorageEntry>

source

fn create_and_insert_store( &self, slot: Slot, size: u64, from: &str ) -> Arc<AccountStorageEntry>

source

fn create_and_insert_store_with_paths( &self, slot: Slot, size: u64, from: &str, paths: &[PathBuf] ) -> Arc<AccountStorageEntry>

source

fn insert_store(&self, slot: Slot, store: Arc<AccountStorageEntry>)

source

pub fn create_drop_bank_callback( &self, pruned_banks_sender: DroppedSlotsSender ) -> SendDroppedBankCallback

source

pub fn purge_slot( &self, slot: Slot, bank_id: BankId, is_serialized_with_abs: bool )

This should only be called after the Bank::drop() runs in bank.rs, See BANK_DROP_SAFETY comment below for more explanation.

  • is_serialized_with_abs - indicates whehter this call runs sequentially with all other accounts_db relevant calls, such as shrinking, purging etc., in account background service.
source

fn recycle_slot_stores( &self, total_removed_storage_entries: usize, slot_stores: &[Arc<AccountStorageEntry>] ) -> u64

source

pub(crate) fn purge_slots_from_cache_and_store<'a>( &self, removed_slots: impl Iterator<Item = &'a Slot> + Clone, purge_stats: &PurgeStats, log_accounts: bool )

Purges every slot in removed_slots from both the cache and storage. This includes entries in the accounts index, cache entries, and any backing storage entries.

source

fn purge_dead_slots_from_storage<'a>( &'a self, removed_slots: impl Iterator<Item = &'a Slot> + Clone, purge_stats: &PurgeStats )

Purge the backing storage entries for the given slot, does not purge from the cache!

source

fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: SlotCache)

source

fn purge_slot_cache_pubkeys( &self, purged_slot: Slot, purged_slot_pubkeys: HashSet<(Slot, Pubkey)>, pubkey_to_slot_set: Vec<(Pubkey, Slot)>, is_dead: bool, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )

source

fn purge_slot_storage(&self, remove_slot: Slot, purge_stats: &PurgeStats)

source

fn purge_slots<'a>(&self, slots: impl Iterator<Item = &'a Slot> + Clone)

source

pub fn remove_unrooted_slots(&self, remove_slots: &[(Slot, BankId)])

source

pub fn hash_account<T: ReadableAccount>( slot: Slot, account: &T, pubkey: &Pubkey, include_slot: IncludeSlotInHash ) -> Hash

source

fn hash_account_data( slot: Slot, lamports: u64, owner: &Pubkey, executable: bool, rent_epoch: Epoch, data: &[u8], pubkey: &Pubkey, include_slot: IncludeSlotInHash ) -> Hash

source

fn bulk_assign_write_version(&self, count: usize) -> StoredMetaWriteVersion

source

fn write_accounts_to_storage<'a, 'b, T: ReadableAccount + Sync, U: StorableAccounts<'a, T>, V: Borrow<Hash>>( &self, slot: Slot, storage: &AccountStorageEntry, accounts_and_meta_to_store: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V> ) -> Vec<AccountInfo>

source

pub fn mark_slot_frozen(&self, slot: Slot)

source

pub fn expire_old_recycle_stores(&self)

source

fn should_aggressively_flush_cache(&self) -> bool

true if write cache is too big

source

pub fn flush_accounts_cache( &self, force_flush: bool, requested_flush_root: Option<Slot> )

source

fn flush_rooted_accounts_cache( &self, requested_flush_root: Option<Slot>, should_clean: Option<(&mut usize, &mut usize)> ) -> (usize, usize)

source

fn do_flush_slot_cache( &self, slot: Slot, slot_cache: &SlotCache, should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>, max_clean_root: Option<Slot> ) -> FlushStats

source

fn flush_slot_cache(&self, slot: Slot) -> Option<FlushStats>

flush all accounts in this slot

source

pub(crate) fn combine_multiple_slots_into_one_at_startup( path: &Path, id: AppendVecId, slot: Slot, slot_stores: &HashMap<AppendVecId, Arc<AccountStorageEntry>> ) -> Arc<AccountStorageEntry>

1.13 and some 1.14 could produce legal snapshots with more than 1 append vec per slot. This is now illegal at runtime in the validator. However, there is a clear path to be able to support this. So, combine all accounts from ‘slot_stores’ into a new storage and return it. This runs prior to the storages being put in AccountsDb.storage

source

fn flush_slot_cache_with_clean( &self, slots: &[Slot], should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>, max_clean_root: Option<Slot> ) -> Option<FlushStats>

should_flush_f is an optional closure that determines whether a given account should be flushed. Passing None will by default flush all accounts

source

fn write_accounts_to_cache<'a, 'b, T: ReadableAccount + Sync, P>( &self, slot: Slot, accounts_and_meta_to_store: &impl StorableAccounts<'b, T>, txn_iter: Box<dyn Iterator<Item = &Option<&SanitizedTransaction>> + 'a>, include_slot_in_hash: IncludeSlotInHash, write_version_producer: P ) -> Vec<AccountInfo>where P: Iterator<Item = u64>,

source

fn store_accounts_to<'a: 'c, 'b, 'c, P: Iterator<Item = u64>, T: ReadableAccount + Sync + ZeroLamport + 'b>( &self, accounts: &'c impl StorableAccounts<'b, T>, hashes: Option<Vec<impl Borrow<Hash>>>, write_version_producer: P, store_to: &StoreTo<'_>, transactions: Option<&[Option<&'a SanitizedTransaction>]> ) -> Vec<AccountInfo>

source

fn report_store_stats(&self)

source

pub fn find_unskipped_slot( &self, slot: Slot, ancestors: Option<&Ancestors> ) -> Option<Slot>

find slot >= ‘slot’ which is a root or in ‘ancestors’

source

pub fn checked_iterative_sum_for_capitalization( total_cap: u64, new_cap: u64 ) -> u64

source

pub fn checked_sum_for_capitalization<T: Iterator<Item = u64>>( balances: T ) -> u64

source

pub fn calculate_accounts_hash_from_index( &self, max_slot: Slot, config: &CalcAccountsHashConfig<'_> ) -> Result<(AccountsHash, u64), AccountsHashVerificationError>

source

pub fn update_accounts_hash_for_tests( &self, slot: Slot, ancestors: &Ancestors, debug_verify: bool, is_startup: bool ) -> (AccountsHash, u64)

source

fn scan_single_account_storage<S>( storage: &Arc<AccountStorageEntry>, scanner: &mut S )where S: AppendVecScan,

iterate over a single storage, calling scanner on each item

source

fn update_old_slot_stats( &self, stats: &HashStats, storage: Option<&Arc<AccountStorageEntry>> )

source

fn apply_offset_to_slot(slot: Slot, offset: i64) -> Slot

return slot + offset, where offset can be +/-

source

fn get_one_epoch_old_slot_for_hash_calc_scan( &self, max_slot_inclusive: Slot, config: &CalcAccountsHashConfig<'_> ) -> Slot

if ancient append vecs are enabled, return a slot ‘max_slot_inclusive’ - (slots_per_epoch - self.ancient_append_vec_offset) otherwise, return 0

source

fn hash_storage_info( hasher: &mut impl StdHasher, storage: Option<&Arc<AccountStorageEntry>>, slot: Slot ) -> bool

hash info about ‘storage’ into ‘hasher’ return true iff storage is valid for loading from cache

source

fn scan_account_storage_no_bank<S>( &self, cache_hash_data: &CacheHashData, config: &CalcAccountsHashConfig<'_>, snapshot_storages: &SortedStorages<'_>, scanner: S, bin_range: &Range<usize>, stats: &mut HashStats ) -> Vec<CacheHashDataFile>where S: AppendVecScan,

Scan through all the account storage in parallel. Returns a Vec of open/mmapped files. Each file has serialized hash info, sorted by pubkey and then slot, from scanning the append vecs. A single pubkey could be in multiple entries. The pubkey found in the latest entry is the one to use.

source

fn mark_old_slots_as_dirty( &self, storages: &SortedStorages<'_>, slots_per_epoch: Slot, stats: &mut HashStats )

storages are sorted by slot and have range info. add all stores older than slots_per_epoch to dirty_stores so clean visits these slots

source

pub(crate) fn calculate_accounts_hash( &self, data_source: CalcAccountsHashDataSource, slot: Slot, config: &CalcAccountsHashConfig<'_> ) -> Result<(AccountsHash, u64), AccountsHashVerificationError>

source

fn calculate_accounts_hash_with_verify( &self, data_source: CalcAccountsHashDataSource, debug_verify: bool, slot: Slot, config: CalcAccountsHashConfig<'_>, expected_capitalization: Option<u64> ) -> Result<(AccountsHash, u64), AccountsHashVerificationError>

source

pub fn update_accounts_hash( &self, data_source: CalcAccountsHashDataSource, debug_verify: bool, slot: Slot, ancestors: &Ancestors, expected_capitalization: Option<u64>, epoch_schedule: &EpochSchedule, rent_collector: &RentCollector, is_startup: bool ) -> (AccountsHash, u64)

source

pub fn update_incremental_accounts_hash( &self, config: &CalcAccountsHashConfig<'_>, storages: &SortedStorages<'_>, slot: Slot, stats: HashStats ) -> Result<(IncrementalAccountsHash, u64), AccountsHashVerificationError>

Calculate the incremental accounts hash for storages and save the results at slot

source

pub fn set_accounts_hash( &self, slot: Slot, accounts_hash: (AccountsHash, u64) ) -> Option<(AccountsHash, u64)>

Set the accounts hash for slot

returns the previous accounts hash for slot

source

pub fn set_accounts_hash_from_snapshot( &mut self, slot: Slot, accounts_hash: SerdeAccountsHash, capitalization: u64 ) -> Option<(AccountsHash, u64)>

After deserializing a snapshot, set the accounts hash for the new AccountsDb

source

pub fn get_accounts_hash(&self, slot: Slot) -> Option<(AccountsHash, u64)>

Get the accounts hash for slot

source

pub fn set_incremental_accounts_hash( &self, slot: Slot, incremental_accounts_hash: (IncrementalAccountsHash, u64) ) -> Option<(IncrementalAccountsHash, u64)>

Set the incremental accounts hash for slot

returns the previous incremental accounts hash for slot

source

pub fn set_incremental_accounts_hash_from_snapshot( &mut self, slot: Slot, incremental_accounts_hash: SerdeIncrementalAccountsHash, capitalization: u64 ) -> Option<(IncrementalAccountsHash, u64)>

After deserializing a snapshot, set the incremental accounts hash for the new AccountsDb

source

pub fn get_incremental_accounts_hash( &self, slot: Slot ) -> Option<(IncrementalAccountsHash, u64)>

Get the incremental accounts hash for slot

source

pub fn purge_old_accounts_hashes(&self, last_full_snapshot_slot: Slot)

Purge accounts hashes that are older than last_full_snapshot_slot

Should only be called by AccountsHashVerifier, since it consumes the accounts hashes and knows which ones are still needed.

source

fn scan_snapshot_stores_with_cache( &self, cache_hash_data: &CacheHashData, storages: &SortedStorages<'_>, stats: &mut HashStats, bins: usize, bin_range: &Range<usize>, config: &CalcAccountsHashConfig<'_>, filler_account_suffix: Option<&Pubkey> ) -> Result<Vec<CacheHashDataFile>, AccountsHashVerificationError>

scan ‘storages’, return a vec of ‘CacheHashDataFile’, one per pass

source

fn sort_slot_storage_scan(accum: BinnedHashData) -> (BinnedHashData, u64)

source

fn assert_safe_squashing_accounts_hash( &self, slot: Slot, epoch_schedule: &EpochSchedule )

if we ever try to calc hash where there are squashed append vecs within the last epoch, we will fail

source

fn get_cache_hash_data( accounts_hash_cache_path: PathBuf, config: &CalcAccountsHashConfig<'_>, slot: Slot ) -> CacheHashData

normal code path returns the common cache path when called after a failure has been detected, redirect the cache storage to a separate folder for debugging later

source

pub fn calculate_accounts_hash_from_storages( &self, config: &CalcAccountsHashConfig<'_>, storages: &SortedStorages<'_>, stats: HashStats ) -> Result<(AccountsHash, u64), AccountsHashVerificationError>

source

pub fn calculate_incremental_accounts_hash( &self, config: &CalcAccountsHashConfig<'_>, storages: &SortedStorages<'_>, stats: HashStats ) -> Result<(IncrementalAccountsHash, u64), AccountsHashVerificationError>

Calculate the incremental accounts hash

This calculation is intended to be used by incremental snapshots, and thus differs from a “full” accounts hash in a few ways:

  • Zero-lamport accounts are included in the hash because zero-lamport accounts are also included in the incremental snapshot. This ensures reconstructing the AccountsDb is still correct when using this incremental accounts hash.
  • storages must be the same as the ones going into the incremental snapshot.
source

fn _calculate_accounts_hash_from_storages( &self, config: &CalcAccountsHashConfig<'_>, storages: &SortedStorages<'_>, stats: HashStats, flavor: CalcAccountsHashFlavor, accounts_hash_cache_path: PathBuf ) -> Result<(AccountsHashEnum, u64), AccountsHashVerificationError>

source

fn calc_alive_ancient_historical_roots(&self, min_root: Slot) -> HashSet<Slot>

return alive roots to retain, even though they are ancient

source

fn remove_old_historical_roots(&self, min_root: Slot)

get rid of historical roots that are older than ‘min_root’. These will be older than an epoch from a current root.

source

pub fn verify_accounts_hash_and_lamports( &self, slot: Slot, total_lamports: u64, base: Option<(Slot, u64)>, config: VerifyAccountsHashAndLamportsConfig<'_> ) -> Result<(), AccountsHashVerificationError>

Verify accounts hash at startup (or tests)

Calculate accounts hash(es) and compare them to the values set at startup. If base is None, only calculates the full accounts hash for [0, slot]. If base is Some, calculate the full accounts hash for [0, base slot] and then calculate the incremental accounts hash for (base slot, slot].

source

pub(crate) fn get_pubkey_hash_for_slot( &self, slot: Slot ) -> (Vec<(Pubkey, Hash)>, u64, Measure)

helper to return

  1. pubkey, hash pairs for the slot
  2. us spent scanning
  3. Measure started when we began accumulating
source

pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash

Calculate accounts delta hash for slot

As part of calculating the accounts delta hash, get a list of accounts modified this slot (aka dirty pubkeys) and add them to self.uncleaned_pubkeys for future cleaning.

source

fn set_accounts_delta_hash( &self, slot: Slot, accounts_delta_hash: AccountsDeltaHash ) -> Option<AccountsDeltaHash>

Set the accounts delta hash for slot in the accounts_delta_hashes map

returns the previous accounts delta hash for slot

source

pub fn set_accounts_delta_hash_from_snapshot( &mut self, slot: Slot, accounts_delta_hash: SerdeAccountsDeltaHash ) -> Option<AccountsDeltaHash>

After deserializing a snapshot, set the accounts delta hash for the new AccountsDb

source

pub fn get_accounts_delta_hash(&self, slot: Slot) -> Option<AccountsDeltaHash>

Get the accounts delta hash for slot in the accounts_delta_hashes map

source

pub fn update_bank_hash_stats_from_snapshot( &mut self, slot: Slot, stats: BankHashStats ) -> Option<BankHashStats>

When reconstructing AccountsDb from a snapshot, insert the bank_hash_stats into the internal bank hash stats map.

This fn is only called when loading from a snapshot, which means AccountsDb is new and its bank hash stats map is unpopulated. Except for slot 0.

Slot 0 is a special case. When a new AccountsDb is created–like when loading from a snapshot–the bank hash stats map is populated with a default entry at slot 0. Remove the default entry at slot 0, and then insert the new value at slot.

source

pub fn get_bank_hash_stats(&self, slot: Slot) -> Option<BankHashStats>

Get the bank hash stats for slot in the bank_hash_stats map

source

fn remove_bank_hash_info(&self, slot: &Slot)

Remove “bank hash info” for slot

This fn removes the accounts delta hash, accounts hash, and bank hash stats for slot from their respective maps.

source

fn remove_bank_hash_infos<'s>(&self, slots: impl IntoIterator<Item = &'s Slot>)

Remove “bank hash info” for slots

This fn removes the accounts delta hash and bank hash stats for slots from their respective maps.

source

fn update_index<'a, T: ReadableAccount + Sync>( &self, infos: Vec<AccountInfo>, accounts: &impl StorableAccounts<'a, T>, reclaim: UpsertReclaim ) -> SlotList<AccountInfo>

source

fn should_not_shrink(aligned_bytes: u64, total_bytes: u64) -> bool

source

fn is_shrinking_productive(slot: Slot, store: &Arc<AccountStorageEntry>) -> bool

source

fn is_candidate_for_shrink( &self, store: &Arc<AccountStorageEntry>, allow_shrink_ancient: bool ) -> bool

source

fn remove_dead_accounts<'a, I>( &'a self, reclaims: I, expected_slot: Option<Slot>, reclaimed_offsets: Option<&mut HashMap<Slot, HashSet<usize>>>, reset_accounts: bool ) -> HashSet<Slot>where I: Iterator<Item = &'a (Slot, AccountInfo)>,

source

fn remove_dead_slots_metadata<'a>( &'a self, dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone, purged_slot_pubkeys: HashSet<(Slot, Pubkey)>, purged_stored_account_slots: Option<&mut HashMap<Pubkey, HashSet<Slot>>>, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )

pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index and should not be unref’d. If they exist in the accounts index, they are NEW.

source

fn unref_pubkeys<'a>( &'a self, pubkeys: impl Iterator<Item = &'a Pubkey> + Clone + Send + Sync, num_pubkeys: usize, pubkeys_removed_from_accounts_index: &'a HashSet<Pubkey> )

lookup each pubkey in ‘pubkeys’ and unref it in the accounts index skip pubkeys that are in ‘pubkeys_removed_from_accounts_index’

source

fn unref_accounts( &self, purged_slot_pubkeys: HashSet<(Slot, Pubkey)>, purged_stored_account_slots: &mut HashMap<Pubkey, HashSet<Slot>>, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )

lookup each pubkey in ‘purged_slot_pubkeys’ and unref it in the accounts index populate ‘purged_stored_account_slots’ by grouping ‘purged_slot_pubkeys’ by pubkey pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index and should not be unref’d. If they exist in the accounts index, they are NEW.

source

fn clean_dead_slots_from_accounts_index<'a>( &'a self, dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone, purged_slot_pubkeys: HashSet<(Slot, Pubkey)>, purged_stored_account_slots: Option<&mut HashMap<Pubkey, HashSet<Slot>>>, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )

pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index and should not be unref’d. If they exist in the accounts index, they are NEW.

source

fn clean_stored_dead_slots( &self, dead_slots: &HashSet<Slot>, purged_account_slots: Option<&mut HashMap<Pubkey, HashSet<Slot>>>, pubkeys_removed_from_accounts_index: &HashSet<Pubkey> )

pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index and should not be unref’d. If they exist in the accounts index, they are NEW.

source

pub fn store_cached<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, transactions: Option<&'a [Option<&'a SanitizedTransaction>]> )

source

pub fn store_uncached( &self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)] )

Store the account update. only called by tests

source

fn store<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, store_to: &StoreTo<'_>, transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, reclaim: StoreReclaims )

source

fn report_store_timings(&self)

source

fn store_accounts_unfrozen<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, hashes: Option<Vec<impl Borrow<Hash>>>, store_to: &StoreTo<'_>, transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, reclaim: StoreReclaims )

source

pub(crate) fn store_accounts_frozen<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, hashes: Option<Vec<impl Borrow<Hash>>>, storage: &Arc<AccountStorageEntry>, write_version_producer: Option<Box<dyn Iterator<Item = StoredMetaWriteVersion>>>, reclaim: StoreReclaims ) -> StoreAccountsTiming

source

fn store_accounts_custom<'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &self, accounts: impl StorableAccounts<'a, T>, hashes: Option<Vec<impl Borrow<Hash>>>, write_version_producer: Option<Box<dyn Iterator<Item = u64>>>, store_to: &StoreTo<'_>, reset_accounts: bool, transactions: Option<&[Option<&SanitizedTransaction>]>, reclaim: StoreReclaims ) -> StoreAccountsTiming

source

pub fn add_root(&self, slot: Slot) -> AccountsAddRootTiming

source

pub fn get_snapshot_storages( &self, requested_slots: impl RangeBounds<Slot> + Sync ) -> (Vec<Arc<AccountStorageEntry>>, Vec<Slot>)

Get storages to use for snapshots, for the requested slots

source

fn process_storage_slot<'a>( &self, storage: &'a Arc<AccountStorageEntry> ) -> HashMap<Pubkey, IndexAccountMapEntry<'a>>

source

fn stats_for_rent_payers<T: ReadableAccount>( pubkey: &Pubkey, account: &T, rent_collector: &RentCollector ) -> Option<u64>

return Some(lamports_to_top_off) if ‘account’ would collect rent

source

fn generate_index_for_slot( &self, accounts_map: HashMap<Pubkey, IndexAccountMapEntry<'_>>, slot: &Slot, rent_collector: &RentCollector ) -> SlotIndexGenerationInfo

source

fn filler_unique_id_bytes() -> usize

source

fn filler_rent_partition_prefix_bytes() -> usize

source

fn filler_prefix_bytes() -> usize

source

pub fn is_filler_account_helper( pubkey: &Pubkey, filler_account_suffix: Option<&Pubkey> ) -> bool

source

pub fn is_filler_account(&self, pubkey: &Pubkey) -> bool

true if ‘pubkey’ is a filler account

source

pub fn filler_accounts_enabled(&self) -> bool

true if it is possible that there are filler accounts present

source

fn get_filler_account(&self, rent: &Rent) -> (AccountSharedData, Hash)

return ‘AccountSharedData’ and a hash for a filler account

source

fn get_filler_account_pubkeys(&self, count: usize) -> Vec<Pubkey>

source

fn get_filler_account_pubkey(&self, subrange: &Pubkey) -> Pubkey

source

pub fn maybe_add_filler_accounts( &self, epoch_schedule: &EpochSchedule, slot: Slot )

filler accounts are space-holding accounts which are ignored by hash calculations and rent. They are designed to allow a validator to run against a network successfully while simulating having many more accounts present. All filler accounts share a common pubkey suffix. The suffix is randomly generated per validator on startup. The filler accounts are added to each slot in the snapshot after index generation. The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old. Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully.

source

pub fn generate_index( &self, limit_load_slot_count_from_snapshot: Option<usize>, verify: bool, genesis_config: &GenesisConfig ) -> IndexGenerationInfo

source

fn maybe_throttle_index_generation(&self)

Startup processes can consume large amounts of memory while inserting accounts into the index as fast as possible. Calling this can slow down the insertion process to allow flushing to disk to keep pace.

source

fn visit_duplicate_pubkeys_during_startup( &self, pubkeys: &[Pubkey], rent_collector: &RentCollector, timings: &GenerateIndexTimings ) -> (u64, HashSet<Slot>)

Used during generate_index() to:

  1. get the duplicate accounts data len from the given pubkeys
  2. get the slots that contained duplicate pubkeys
  3. update rent stats Note this should only be used when ALL entries in the accounts index are roots. returns (data len sum of all older duplicates, slots that contained duplicate pubkeys)
source

fn update_storage_info( storage_info: &DashMap<AppendVecId, StorageSizeAndCount>, accounts_map: &HashMap<Pubkey, IndexAccountMapEntry<'_>>, timings: &Mutex<GenerateIndexTimings> )

source

fn set_storage_count_and_alive_bytes( &self, stored_sizes_and_counts: DashMap<AppendVecId, StorageSizeAndCount>, timings: &mut GenerateIndexTimings )

source

pub(crate) fn print_accounts_stats(&self, label: &str)

source

fn print_index(&self, label: &str)

source

fn print_count_and_status(&self, label: &str)

source§

impl AccountsDb

source

pub(crate) fn combine_ancient_slots_packed( &self, sorted_slots: Vec<Slot>, can_randomly_shrink: bool )

Combine account data from storages in ‘sorted_slots’ into packed storages. This keeps us from accumulating storages for each slot older than an epoch. Ater this function the number of alive roots is <= # alive roots when it was called. In practice, the # of alive roots after will be significantly less than # alive roots when called. Trying to reduce # roots and storages (one per root) required to store all the data in ancient slots

source

fn combine_ancient_slots_packed_internal( &self, sorted_slots: Vec<Slot>, tuning: PackedAncientStorageTuning, metrics: &mut ShrinkStatsSub )

source

fn addref_accounts_failed_to_shrink_ancient( &self, accounts_to_combine: AccountsToCombine<'_> )

for each account in unrefed_pubkeys, in each accounts_to_combine, addref

source

fn collect_sort_filter_ancient_slots( &self, slots: Vec<Slot>, tuning: &PackedAncientStorageTuning ) -> AncientSlotInfos

calculate all storage info for the storages in slots Then, apply ‘tuning’ to filter out slots we do NOT want to combine.

source

fn write_ancient_accounts<'a, 'b: 'a, T: ReadableAccount + Sync + ZeroLamport + 'a>( &'b self, bytes: u64, accounts_to_write: impl StorableAccounts<'a, T>, write_ancient_accounts: &mut WriteAncientAccounts<'b> )

create append vec of size ‘bytes’ write ‘accounts_to_write’ into it return shrink_in_progress and some metrics

source

fn calc_ancient_slot_info( &self, slots: Vec<Slot>, can_randomly_shrink: bool ) -> AncientSlotInfos

go through all slots and populate ‘SlotInfo’, per slot This provides the list of possible ancient slots to sort, filter, and then combine.

source

fn write_packed_storages<'a, 'b>( &'a self, accounts_to_combine: &'b AccountsToCombine<'b>, packed_contents: Vec<PackedAncientStorage<'b>> ) -> WriteAncientAccounts<'a>

write packed storages as described in ‘accounts_to_combine’ and ‘packed_contents’

source

fn get_unique_accounts_from_storage_for_combining_ancient_slots<'a>( &self, ancient_slots: &'a [SlotInfo] ) -> Vec<(&'a SlotInfo, GetUniqueAccountsResult<'a>)>

for each slot in ‘ancient_slots’, collect all accounts in that slot return the collection of accounts by slot

source

fn finish_combine_ancient_slots_packed_internal( &self, accounts_to_combine: AccountsToCombine<'_>, write_ancient_accounts: WriteAncientAccounts<'_>, metrics: &mut ShrinkStatsSub )

finish shrink operation on slots where a new storage was created drop root and storage for all original slots whose contents were combined into other storages

source

fn calc_accounts_to_combine<'a>( &self, accounts_per_storage: &'a Vec<(&'a SlotInfo, GetUniqueAccountsResult<'a>)> ) -> AccountsToCombine<'a>

given all accounts per ancient slot, in slots that we want to combine together:

  1. Look up each pubkey in the index
  2. separate, by slot, into: 2a. pubkeys with refcount = 1. This means this pubkey exists NOWHERE else in accounts db. 2b. pubkeys with refcount > 1 Note that the return value can contain fewer items than ‘accounts_per_storage’ if we find storages which won’t be affected. ‘accounts_per_storage’ should be sorted by slot
source

fn get_many_refs_pubkeys<'a>( shrink_collect: &ShrinkCollect<'a, ShrinkCollectAliveSeparatedByRefs<'a>> ) -> Vec<Pubkey>

return pubkeys from many_refs accounts

source

fn revisit_accounts_with_many_refs<'a>( &self, shrink_collect: &mut ShrinkCollect<'a, ShrinkCollectAliveSeparatedByRefs<'a>> )

After calling shrink_collect() on many slots, any dead accounts in those slots would be unref’d. Alive accounts which had ref_count > 1 are stored in shrink_collect.alive_accounts.many_refs. Since many slots were being visited, it is possible that at a point in time, an account was found to be alive and have ref_count > 1. Concurrently, another slot was visited which also had the account, but the account was dead and unref’d in that shrink_collect() call. So, now that all unrefs have occurred, go back through the small number of many_refs accounts and for all that now only have 1 ref_count, move the account from many_refs to one_ref.

source

fn write_one_packed_storage<'a, 'b: 'a>( &'b self, packed: &'a PackedAncientStorage<'a>, target_slot: Slot, write_ancient_accounts: &mut WriteAncientAccounts<'b> )

create packed storage and write contents of ‘packed’ to it. accumulate results in ‘write_ancient_accounts’

source

fn write_ancient_accounts_to_same_slot_multiple_refs<'a, 'b: 'a>( &'b self, accounts_to_combine: impl Iterator<Item = &'a AliveAccounts<'a>>, write_ancient_accounts: &mut WriteAncientAccounts<'b> )

For each slot and alive accounts in ‘accounts_to_combine’ create a PackedAncientStorage that only contains the given alive accounts. This will represent only the accounts with ref_count > 1 from the original storage. These accounts need to be rewritten in their same slot, Ideally with no other accounts in the slot. Other accounts would have ref_count = 1. ref_count = 1 accounts will be combined together with other slots into larger append vecs elsewhere.

Trait Implementations§

source§

impl Debug for AccountsDb

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> AbiExample for T

source§

default fn example() -> T

source§

impl<T> Any for Twhere T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for Twhere T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for Twhere T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T> Instrument for T

source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for Twhere U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

§

impl<T> Pointable for T

§

const ALIGN: usize = mem::align_of::<T>()

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
source§

impl<T> Same<T> for T

§

type Output = T

Should always be Self
source§

impl<T, U> TryFrom<U> for Twhere U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for Twhere U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for Twhere V: MultiLane<T>,

§

fn vzip(self) -> V

source§

impl<T> WithSubscriber for T

source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more