pub struct InMemAccountsIndex<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> {Show 13 fields
last_age_flushed: AtomicU8,
map_internal: RwLock<HashMap<Pubkey, Arc<AccountMapEntryInner<T>>>>,
storage: Arc<BucketMapHolder<T, U>>,
bin: usize,
bucket: Option<Arc<BucketApi<(Slot, U)>>>,
pub(crate) cache_ranges_held: RwLock<Vec<RangeInclusive<Pubkey>>>,
stop_evictions_changes: AtomicU64,
stop_evictions: AtomicU64,
flushing_active: AtomicBool,
startup_info: StartupInfo<T>,
possible_evictions: RwLock<PossibleEvictions<T>>,
remaining_ages_to_skip_flushing: AtomicU8,
num_ages_to_distribute_flushes: Age,
}
Fields§
§last_age_flushed: AtomicU8
§map_internal: RwLock<HashMap<Pubkey, Arc<AccountMapEntryInner<T>>>>
§storage: Arc<BucketMapHolder<T, U>>
§bin: usize
§bucket: Option<Arc<BucketApi<(Slot, U)>>>
§cache_ranges_held: RwLock<Vec<RangeInclusive<Pubkey>>>
§stop_evictions_changes: AtomicU64
§stop_evictions: AtomicU64
§flushing_active: AtomicBool
§startup_info: StartupInfo<T>
info to streamline initial index generation
possible_evictions: RwLock<PossibleEvictions<T>>
possible evictions for next few slots coming up
remaining_ages_to_skip_flushing: AtomicU8
how many more ages to skip before this bucket is flushed (as opposed to being skipped). When this reaches 0, this bucket is flushed.
num_ages_to_distribute_flushes: Age
an individual bucket will evict its entries and write to disk every 1/NUM_AGES_TO_DISTRIBUTE_FLUSHES ages Higher numbers mean we flush less buckets/s Lower numbers mean we flush more buckets/s
Implementations§
source§impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T, U>
impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T, U>
pub fn new(storage: &Arc<BucketMapHolder<T, U>>, bin: usize) -> Self
sourcefn get_should_age(&self, age: Age) -> bool
fn get_should_age(&self, age: Age) -> bool
true if this bucket needs to call flush for the current age we need to scan each bucket once per value of age
sourcefn set_has_aged(&self, age: Age, can_advance_age: bool)
fn set_has_aged(&self, age: Age, can_advance_age: bool)
called after flush scans this bucket at the current age
fn last_age_flushed(&self) -> Age
sourcepub fn shrink_to_fit(&self)
pub fn shrink_to_fit(&self)
Release entire in-mem hashmap to free all memory associated with it. Idea is that during startup we needed a larger map than we need during runtime. When using disk-buckets, in-mem index grows over time with dynamic use and then shrinks, in theory back to 0.
pub fn items<R>(&self, range: &R) -> Vec<(Pubkey, Arc<AccountMapEntryInner<T>>)>where R: RangeBounds<Pubkey> + Debug,
pub fn keys(&self) -> Vec<Pubkey>
fn load_from_disk(&self, pubkey: &Pubkey) -> Option<(SlotList<U>, RefCount)>
fn load_account_entry_from_disk( &self, pubkey: &Pubkey ) -> Option<Arc<AccountMapEntryInner<T>>>
sourcefn get_only_in_mem<RT>(
&self,
pubkey: &Pubkey,
update_age: bool,
callback: impl for<'a> FnOnce(Option<&'a Arc<AccountMapEntryInner<T>>>) -> RT
) -> RT
fn get_only_in_mem<RT>( &self, pubkey: &Pubkey, update_age: bool, callback: impl for<'a> FnOnce(Option<&'a Arc<AccountMapEntryInner<T>>>) -> RT ) -> RT
lookup ‘pubkey’ by only looking in memory. Does not look on disk. callback is called whether pubkey is found or not
sourcepub fn get(&self, pubkey: &Pubkey) -> Option<Arc<AccountMapEntryInner<T>>>
pub fn get(&self, pubkey: &Pubkey) -> Option<Arc<AccountMapEntryInner<T>>>
lookup ‘pubkey’ in index (in mem or on disk)
sourcefn set_age_to_future(
&self,
entry: &Arc<AccountMapEntryInner<T>>,
is_cached: bool
)
fn set_age_to_future( &self, entry: &Arc<AccountMapEntryInner<T>>, is_cached: bool )
set age of ‘entry’ to the future if ‘is_cached’, age will be set farther
sourcepub(crate) fn get_internal<RT>(
&self,
pubkey: &Pubkey,
callback: impl for<'a> FnOnce(Option<&Arc<AccountMapEntryInner<T>>>) -> (bool, RT)
) -> RT
pub(crate) fn get_internal<RT>( &self, pubkey: &Pubkey, callback: impl for<'a> FnOnce(Option<&Arc<AccountMapEntryInner<T>>>) -> (bool, RT) ) -> RT
lookup ‘pubkey’ in index (in_mem or disk). call ‘callback’ whether found or not
fn remove_if_slot_list_empty_value(&self, is_empty: bool) -> bool
fn delete_disk_key(&self, pubkey: &Pubkey)
sourcefn remove_if_slot_list_empty_entry(
&self,
entry: Entry<'_, Pubkey, Arc<AccountMapEntryInner<T>>>
) -> bool
fn remove_if_slot_list_empty_entry( &self, entry: Entry<'_, Pubkey, Arc<AccountMapEntryInner<T>>> ) -> bool
return false if the entry is in the index (disk or memory) and has a slot list len > 0 return true in all other cases, including if the entry is NOT in the index at all
pub fn remove_if_slot_list_empty(&self, pubkey: Pubkey) -> bool
pub fn slot_list_mut<RT>( &self, pubkey: &Pubkey, user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList<T>>) -> RT ) -> Option<RT>
sourcefn update_slot_list_entry(
&self,
entry: &Arc<AccountMapEntryInner<T>>,
new_value: PreAllocatedAccountMapEntry<T>,
other_slot: Option<Slot>,
reclaims: &mut SlotList<T>,
reclaim: UpsertReclaim
)
fn update_slot_list_entry( &self, entry: &Arc<AccountMapEntryInner<T>>, new_value: PreAllocatedAccountMapEntry<T>, other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim )
update ‘entry’ with ‘new_value’
pub fn upsert( &self, pubkey: &Pubkey, new_value: PreAllocatedAccountMapEntry<T>, other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim )
fn update_entry_stats(&self, stopped_measure: Measure, found: bool)
sourcepub(crate) fn lock_and_update_slot_list(
current: &AccountMapEntryInner<T>,
new_value: (Slot, T),
other_slot: Option<Slot>,
reclaims: &mut SlotList<T>,
reclaim: UpsertReclaim
) -> usize
pub(crate) fn lock_and_update_slot_list( current: &AccountMapEntryInner<T>, new_value: (Slot, T), other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim ) -> usize
Try to update an item in the slot list the given slot
If an item for the slot
already exists in the list, remove the older item, add it to reclaims
, and insert
the new item.
if ‘other_slot’ is some, then also remove any entries in the slot list that are at ‘other_slot’
return resulting len of slot list
sourcefn update_slot_list(
slot_list: &mut SlotList<T>,
slot: Slot,
account_info: T,
other_slot: Option<Slot>,
reclaims: &mut SlotList<T>,
reclaim: UpsertReclaim
) -> bool
fn update_slot_list( slot_list: &mut SlotList<T>, slot: Slot, account_info: T, other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim ) -> bool
modifies slot_list any entry at ‘slot’ or slot ‘other_slot’ is replaced with ‘account_info’. or, ‘account_info’ is appended to the slot list if the slot did not exist previously. returns true if caller should addref conditions when caller should addref: ‘account_info’ does NOT represent a cached storage (the slot is being flushed from the cache) AND previous slot_list entry AT ‘slot’ did not exist (this is the first time this account was modified in this “slot”), or was previously cached (the storage is now being flushed from the cache) Note that even if entry DID exist at ‘other_slot’, the above conditions apply.
fn disk_to_cache_entry( &self, slot_list: SlotList<U>, ref_count: RefCount ) -> Arc<AccountMapEntryInner<T>>
pub fn len_for_stats(&self) -> usize
sourcepub fn startup_insert_only(
&self,
slot: Slot,
items: impl Iterator<Item = (Pubkey, T)>
)
pub fn startup_insert_only( &self, slot: Slot, items: impl Iterator<Item = (Pubkey, T)> )
Queue up these insertions for when the flush thread is dealing with this bin. This is very fast and requires no lookups or disk access.
pub fn insert_new_entry_if_missing_with_lock( &self, pubkey: Pubkey, new_entry: PreAllocatedAccountMapEntry<T> ) -> InsertNewEntryResults
sourcefn add_hold_range_in_memory_if_already_held<R>(
&self,
range: &R,
evictions_guard: &EvictionsGuard<'_>
) -> boolwhere
R: RangeBounds<Pubkey>,
fn add_hold_range_in_memory_if_already_held<R>( &self, range: &R, evictions_guard: &EvictionsGuard<'_> ) -> boolwhere R: RangeBounds<Pubkey>,
Look at the currently held ranges. If ‘range’ is already included in what is being held, then add ‘range’ to the currently held list AND return true If ‘range’ is NOT already included in what is being held, then return false withOUT adding ‘range’ to the list of what is currently held
fn just_set_hold_range_in_memory<R>( &self, range: &R, start_holding: bool, evictions_guard: &EvictionsGuard<'_> )where R: RangeBounds<Pubkey>,
sourcefn just_set_hold_range_in_memory_internal<R>(
&self,
range: &R,
start_holding: bool,
only_add_if_already_held: bool,
_evictions_guard: &EvictionsGuard<'_>
) -> boolwhere
R: RangeBounds<Pubkey>,
fn just_set_hold_range_in_memory_internal<R>( &self, range: &R, start_holding: bool, only_add_if_already_held: bool, _evictions_guard: &EvictionsGuard<'_> ) -> boolwhere R: RangeBounds<Pubkey>,
if ‘start_holding’, then caller wants to add ‘range’ to the list of ranges being held if !‘start_holding’, then caller wants to remove ‘range’ to the list if ‘only_add_if_already_held’, caller intends to only add ‘range’ to the list if the range is already held returns true iff start_holding=true and the range we’re asked to hold was already being held
sourcepub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)where
R: RangeBounds<Pubkey> + Debug,
pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)where R: RangeBounds<Pubkey> + Debug,
if ‘start_holding’=true, then: at the end of this function, cache_ranges_held will be updated to contain ‘range’ and all pubkeys in that range will be in the in-mem cache if ‘start_holding’=false, then: ‘range’ will be removed from cache_ranges_held and all pubkeys will be eligible for being removed from in-mem cache in the bg if no other range is holding them Any in-process flush will be aborted when it gets to evicting items from in-mem.
fn put_range_in_cache<R>( &self, range: &Option<&R>, _evictions_guard: &EvictionsGuard<'_> )where R: RangeBounds<Pubkey>,
sourcefn get_stop_evictions(&self) -> bool
fn get_stop_evictions(&self) -> bool
returns true if there are active requests to stop evictions
sourcefn get_stop_evictions_changes(&self) -> u64
fn get_stop_evictions_changes(&self) -> u64
return count of calls to ‘start_stop_evictions’, indicating changes could have been made to eviction strategy
pub(crate) fn flush(&self, can_advance_age: bool)
sourcefn random_chance_of_eviction() -> bool
fn random_chance_of_eviction() -> bool
returns true if a dice roll indicates this call should result in a random eviction. This causes non-determinism in cache contents per validator.
sourcefn approx_size_of_one_entry() -> usize
fn approx_size_of_one_entry() -> usize
assumes 1 entry in the slot list. Ignores overhead of the HashMap and such
fn should_evict_based_on_age( current_age: Age, entry: &Arc<AccountMapEntryInner<T>>, startup: bool, ages_flushing_now: Age ) -> bool
sourcefn should_evict_from_mem<'a>(
&self,
current_age: Age,
entry: &'a Arc<AccountMapEntryInner<T>>,
startup: bool,
update_stats: bool,
exceeds_budget: bool,
ages_flushing_now: Age
) -> (bool, Option<RwLockReadGuard<'a, SlotList<T>>>)
fn should_evict_from_mem<'a>( &self, current_age: Age, entry: &'a Arc<AccountMapEntryInner<T>>, startup: bool, update_stats: bool, exceeds_budget: bool, ages_flushing_now: Age ) -> (bool, Option<RwLockReadGuard<'a, SlotList<T>>>)
return true if ‘entry’ should be evicted from the in-mem index
sourcefn gather_possible_evictions<'a>(
iter: impl Iterator<Item = (&'a Pubkey, &'a Arc<AccountMapEntryInner<T>>)>,
possible_evictions: &mut PossibleEvictions<T>,
startup: bool,
current_age: Age,
ages_flushing_now: Age,
can_randomly_flush: bool
)
fn gather_possible_evictions<'a>( iter: impl Iterator<Item = (&'a Pubkey, &'a Arc<AccountMapEntryInner<T>>)>, possible_evictions: &mut PossibleEvictions<T>, startup: bool, current_age: Age, ages_flushing_now: Age, can_randomly_flush: bool )
fill in possible_evictions
from iter
by checking age
sourcefn flush_scan(
&self,
current_age: Age,
startup: bool,
_flush_guard: &FlushGuard<'_>,
ages_flushing_now: Age
) -> FlushScanResult<T>
fn flush_scan( &self, current_age: Age, startup: bool, _flush_guard: &FlushGuard<'_>, ages_flushing_now: Age ) -> FlushScanResult<T>
scan loop holds read lock identifies items which are potential candidates to evict
fn write_startup_info_to_disk(&self)
sourcepub(crate) fn populate_and_retrieve_duplicate_keys_from_startup(
&self
) -> Vec<(Slot, Pubkey)>
pub(crate) fn populate_and_retrieve_duplicate_keys_from_startup( &self ) -> Vec<(Slot, Pubkey)>
pull out all duplicate pubkeys from ‘startup_info’ duplicate pubkeys have a slot list with len > 1 These were collected for this bin when we did batch inserts in the bg flush threads. Insert these into the in-mem index, then return the duplicate (Slot, Pubkey)
sourcefn flush_internal(&self, flush_guard: &FlushGuard<'_>, can_advance_age: bool)
fn flush_internal(&self, flush_guard: &FlushGuard<'_>, can_advance_age: bool)
synchronize the in-mem index with the disk index
sourcefn get_exceeds_budget(&self) -> bool
fn get_exceeds_budget(&self) -> bool
calculate the estimated size of the in-mem index return whether the size exceeds the specified budget
sourcefn move_ages_to_future(&self, next_age: Age, current_age: Age, keys: &[Pubkey])
fn move_ages_to_future(&self, next_age: Age, current_age: Age, keys: &[Pubkey])
for each key in ‘keys’, look up in map, set age to the future