pub struct InMemAccountsIndex<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> {
Show 13 fields last_age_flushed: AtomicU8, map_internal: RwLock<HashMap<Pubkey, Arc<AccountMapEntryInner<T>>>>, storage: Arc<BucketMapHolder<T, U>>, bin: usize, bucket: Option<Arc<BucketApi<(Slot, U)>>>, pub(crate) cache_ranges_held: RwLock<Vec<RangeInclusive<Pubkey>>>, stop_evictions_changes: AtomicU64, stop_evictions: AtomicU64, flushing_active: AtomicBool, startup_info: StartupInfo<T>, possible_evictions: RwLock<PossibleEvictions<T>>, remaining_ages_to_skip_flushing: AtomicU8, num_ages_to_distribute_flushes: Age,
}

Fields§

§last_age_flushed: AtomicU8§map_internal: RwLock<HashMap<Pubkey, Arc<AccountMapEntryInner<T>>>>§storage: Arc<BucketMapHolder<T, U>>§bin: usize§bucket: Option<Arc<BucketApi<(Slot, U)>>>§cache_ranges_held: RwLock<Vec<RangeInclusive<Pubkey>>>§stop_evictions_changes: AtomicU64§stop_evictions: AtomicU64§flushing_active: AtomicBool§startup_info: StartupInfo<T>

info to streamline initial index generation

§possible_evictions: RwLock<PossibleEvictions<T>>

possible evictions for next few slots coming up

§remaining_ages_to_skip_flushing: AtomicU8

how many more ages to skip before this bucket is flushed (as opposed to being skipped). When this reaches 0, this bucket is flushed.

§num_ages_to_distribute_flushes: Age

an individual bucket will evict its entries and write to disk every 1/NUM_AGES_TO_DISTRIBUTE_FLUSHES ages Higher numbers mean we flush less buckets/s Lower numbers mean we flush more buckets/s

Implementations§

source§

impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> InMemAccountsIndex<T, U>

source

pub fn new(storage: &Arc<BucketMapHolder<T, U>>, bin: usize) -> Self

source

fn get_should_age(&self, age: Age) -> bool

true if this bucket needs to call flush for the current age we need to scan each bucket once per value of age

source

fn set_has_aged(&self, age: Age, can_advance_age: bool)

called after flush scans this bucket at the current age

source

fn last_age_flushed(&self) -> Age

source

pub fn shrink_to_fit(&self)

Release entire in-mem hashmap to free all memory associated with it. Idea is that during startup we needed a larger map than we need during runtime. When using disk-buckets, in-mem index grows over time with dynamic use and then shrinks, in theory back to 0.

source

pub fn items<R>(&self, range: &R) -> Vec<(Pubkey, Arc<AccountMapEntryInner<T>>)>where R: RangeBounds<Pubkey> + Debug,

source

pub fn keys(&self) -> Vec<Pubkey>

source

fn load_from_disk(&self, pubkey: &Pubkey) -> Option<(SlotList<U>, RefCount)>

source

fn load_account_entry_from_disk( &self, pubkey: &Pubkey ) -> Option<Arc<AccountMapEntryInner<T>>>

source

fn get_only_in_mem<RT>( &self, pubkey: &Pubkey, update_age: bool, callback: impl for<'a> FnOnce(Option<&'a Arc<AccountMapEntryInner<T>>>) -> RT ) -> RT

lookup ‘pubkey’ by only looking in memory. Does not look on disk. callback is called whether pubkey is found or not

source

pub fn get(&self, pubkey: &Pubkey) -> Option<Arc<AccountMapEntryInner<T>>>

lookup ‘pubkey’ in index (in mem or on disk)

source

fn set_age_to_future( &self, entry: &Arc<AccountMapEntryInner<T>>, is_cached: bool )

set age of ‘entry’ to the future if ‘is_cached’, age will be set farther

source

pub(crate) fn get_internal<RT>( &self, pubkey: &Pubkey, callback: impl for<'a> FnOnce(Option<&Arc<AccountMapEntryInner<T>>>) -> (bool, RT) ) -> RT

lookup ‘pubkey’ in index (in_mem or disk). call ‘callback’ whether found or not

source

fn remove_if_slot_list_empty_value(&self, is_empty: bool) -> bool

source

fn delete_disk_key(&self, pubkey: &Pubkey)

source

fn remove_if_slot_list_empty_entry( &self, entry: Entry<'_, Pubkey, Arc<AccountMapEntryInner<T>>> ) -> bool

return false if the entry is in the index (disk or memory) and has a slot list len > 0 return true in all other cases, including if the entry is NOT in the index at all

source

pub fn remove_if_slot_list_empty(&self, pubkey: Pubkey) -> bool

source

pub fn slot_list_mut<RT>( &self, pubkey: &Pubkey, user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList<T>>) -> RT ) -> Option<RT>

source

fn update_slot_list_entry( &self, entry: &Arc<AccountMapEntryInner<T>>, new_value: PreAllocatedAccountMapEntry<T>, other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim )

update ‘entry’ with ‘new_value’

source

pub fn upsert( &self, pubkey: &Pubkey, new_value: PreAllocatedAccountMapEntry<T>, other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim )

source

fn update_entry_stats(&self, stopped_measure: Measure, found: bool)

source

pub(crate) fn lock_and_update_slot_list( current: &AccountMapEntryInner<T>, new_value: (Slot, T), other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim ) -> usize

Try to update an item in the slot list the given slot If an item for the slot already exists in the list, remove the older item, add it to reclaims, and insert the new item. if ‘other_slot’ is some, then also remove any entries in the slot list that are at ‘other_slot’ return resulting len of slot list

source

fn update_slot_list( slot_list: &mut SlotList<T>, slot: Slot, account_info: T, other_slot: Option<Slot>, reclaims: &mut SlotList<T>, reclaim: UpsertReclaim ) -> bool

modifies slot_list any entry at ‘slot’ or slot ‘other_slot’ is replaced with ‘account_info’. or, ‘account_info’ is appended to the slot list if the slot did not exist previously. returns true if caller should addref conditions when caller should addref: ‘account_info’ does NOT represent a cached storage (the slot is being flushed from the cache) AND previous slot_list entry AT ‘slot’ did not exist (this is the first time this account was modified in this “slot”), or was previously cached (the storage is now being flushed from the cache) Note that even if entry DID exist at ‘other_slot’, the above conditions apply.

source

fn disk_to_cache_entry( &self, slot_list: SlotList<U>, ref_count: RefCount ) -> Arc<AccountMapEntryInner<T>>

source

pub fn len_for_stats(&self) -> usize

source

pub fn startup_insert_only( &self, slot: Slot, items: impl Iterator<Item = (Pubkey, T)> )

Queue up these insertions for when the flush thread is dealing with this bin. This is very fast and requires no lookups or disk access.

source

pub fn insert_new_entry_if_missing_with_lock( &self, pubkey: Pubkey, new_entry: PreAllocatedAccountMapEntry<T> ) -> InsertNewEntryResults

source

fn add_hold_range_in_memory_if_already_held<R>( &self, range: &R, evictions_guard: &EvictionsGuard<'_> ) -> boolwhere R: RangeBounds<Pubkey>,

Look at the currently held ranges. If ‘range’ is already included in what is being held, then add ‘range’ to the currently held list AND return true If ‘range’ is NOT already included in what is being held, then return false withOUT adding ‘range’ to the list of what is currently held

source

fn just_set_hold_range_in_memory<R>( &self, range: &R, start_holding: bool, evictions_guard: &EvictionsGuard<'_> )where R: RangeBounds<Pubkey>,

source

fn just_set_hold_range_in_memory_internal<R>( &self, range: &R, start_holding: bool, only_add_if_already_held: bool, _evictions_guard: &EvictionsGuard<'_> ) -> boolwhere R: RangeBounds<Pubkey>,

if ‘start_holding’, then caller wants to add ‘range’ to the list of ranges being held if !‘start_holding’, then caller wants to remove ‘range’ to the list if ‘only_add_if_already_held’, caller intends to only add ‘range’ to the list if the range is already held returns true iff start_holding=true and the range we’re asked to hold was already being held

source

pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool)where R: RangeBounds<Pubkey> + Debug,

if ‘start_holding’=true, then: at the end of this function, cache_ranges_held will be updated to contain ‘range’ and all pubkeys in that range will be in the in-mem cache if ‘start_holding’=false, then: ‘range’ will be removed from cache_ranges_held and all pubkeys will be eligible for being removed from in-mem cache in the bg if no other range is holding them Any in-process flush will be aborted when it gets to evicting items from in-mem.

source

fn put_range_in_cache<R>( &self, range: &Option<&R>, _evictions_guard: &EvictionsGuard<'_> )where R: RangeBounds<Pubkey>,

source

fn get_stop_evictions(&self) -> bool

returns true if there are active requests to stop evictions

source

fn get_stop_evictions_changes(&self) -> u64

return count of calls to ‘start_stop_evictions’, indicating changes could have been made to eviction strategy

source

pub(crate) fn flush(&self, can_advance_age: bool)

source

fn random_chance_of_eviction() -> bool

returns true if a dice roll indicates this call should result in a random eviction. This causes non-determinism in cache contents per validator.

source

fn approx_size_of_one_entry() -> usize

assumes 1 entry in the slot list. Ignores overhead of the HashMap and such

source

fn should_evict_based_on_age( current_age: Age, entry: &Arc<AccountMapEntryInner<T>>, startup: bool, ages_flushing_now: Age ) -> bool

source

fn should_evict_from_mem<'a>( &self, current_age: Age, entry: &'a Arc<AccountMapEntryInner<T>>, startup: bool, update_stats: bool, exceeds_budget: bool, ages_flushing_now: Age ) -> (bool, Option<RwLockReadGuard<'a, SlotList<T>>>)

return true if ‘entry’ should be evicted from the in-mem index

source

fn gather_possible_evictions<'a>( iter: impl Iterator<Item = (&'a Pubkey, &'a Arc<AccountMapEntryInner<T>>)>, possible_evictions: &mut PossibleEvictions<T>, startup: bool, current_age: Age, ages_flushing_now: Age, can_randomly_flush: bool )

fill in possible_evictions from iter by checking age

source

fn flush_scan( &self, current_age: Age, startup: bool, _flush_guard: &FlushGuard<'_>, ages_flushing_now: Age ) -> FlushScanResult<T>

scan loop holds read lock identifies items which are potential candidates to evict

source

fn write_startup_info_to_disk(&self)

source

pub(crate) fn populate_and_retrieve_duplicate_keys_from_startup( &self ) -> Vec<(Slot, Pubkey)>

pull out all duplicate pubkeys from ‘startup_info’ duplicate pubkeys have a slot list with len > 1 These were collected for this bin when we did batch inserts in the bg flush threads. Insert these into the in-mem index, then return the duplicate (Slot, Pubkey)

source

fn flush_internal(&self, flush_guard: &FlushGuard<'_>, can_advance_age: bool)

synchronize the in-mem index with the disk index

source

fn get_exceeds_budget(&self) -> bool

calculate the estimated size of the in-mem index return whether the size exceeds the specified budget

source

fn move_ages_to_future(&self, next_age: Age, current_age: Age, keys: &[Pubkey])

for each key in ‘keys’, look up in map, set age to the future

source

fn evict_from_cache( &self, evictions: Vec<Pubkey>, current_age: Age, startup: bool, randomly_evicted: bool, ages_flushing_now: Age )

source

pub fn stats(&self) -> &BucketMapHolderStats

source

fn update_stat(stat: &AtomicU64, value: u64)

source

pub fn update_time_stat(stat: &AtomicU64, m: Measure)

Trait Implementations§

source§

impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> Debug for InMemAccountsIndex<T, U>

source§

fn fmt(&self, _f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl<T, U> RefUnwindSafe for InMemAccountsIndex<T, U>where T: RefUnwindSafe,

§

impl<T, U> Send for InMemAccountsIndex<T, U>

§

impl<T, U> Sync for InMemAccountsIndex<T, U>

§

impl<T, U> Unpin for InMemAccountsIndex<T, U>where T: Unpin,

§

impl<T, U> UnwindSafe for InMemAccountsIndex<T, U>where T: RefUnwindSafe,

Blanket Implementations§

source§

impl<T> AbiExample for T

source§

default fn example() -> T

source§

impl<T> Any for Twhere T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for Twhere T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for Twhere T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T> Instrument for T

source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for Twhere U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

§

impl<T> Pointable for T

§

const ALIGN: usize = mem::align_of::<T>()

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
source§

impl<T> Same<T> for T

§

type Output = T

Should always be Self
source§

impl<T, U> TryFrom<U> for Twhere U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for Twhere U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for Twhere V: MultiLane<T>,

§

fn vzip(self) -> V

source§

impl<T> WithSubscriber for T

source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more