Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:12:52

0001 /*
0002     Copyright (c) 2005-2020 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_concurrent_lru_cache_H
0018 #define __TBB_concurrent_lru_cache_H
0019 
0020 #define __TBB_concurrent_lru_cache_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022 
0023 #if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE
0024     #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h
0025 #endif
0026 
0027 #include "tbb_stddef.h"
0028 
0029 #include <map>
0030 #include <list>
0031 #include <algorithm> // std::find
0032 #if __TBB_CPP11_RVALUE_REF_PRESENT
0033 #include <utility> // std::move
0034 #endif
0035 
0036 #include "atomic.h"
0037 #include "internal/_aggregator_impl.h"
0038 
0039 namespace tbb{
0040 namespace interface6 {
0041 
0042 
0043 template <typename key_type, typename value_type, typename value_functor_type = value_type (*)(key_type) >
0044 class concurrent_lru_cache : internal::no_assign{
0045 private:
0046     typedef concurrent_lru_cache self_type;
0047     typedef value_functor_type value_function_type;
0048     typedef std::size_t ref_counter_type;
0049     struct map_value_type;
0050     typedef std::map<key_type, map_value_type> map_storage_type;
0051     typedef std::list<typename map_storage_type::iterator> lru_list_type;
0052     struct map_value_type {
0053         value_type my_value;
0054         ref_counter_type my_ref_counter;
0055         typename lru_list_type::iterator my_lru_list_iterator;
0056         bool my_is_ready;
0057 
0058         map_value_type (value_type const& a_value,  ref_counter_type a_ref_counter,    typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready)
0059             : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready)
0060         {}
0061     };
0062 
0063     class handle_object;
0064 
0065     struct aggregator_operation;
0066     typedef aggregator_operation aggregated_operation_type;
0067     typedef tbb::internal::aggregating_functor<self_type,aggregated_operation_type> aggregator_function_type;
0068     friend class tbb::internal::aggregating_functor<self_type,aggregated_operation_type>;
0069     typedef tbb::internal::aggregator<aggregator_function_type, aggregated_operation_type> aggregator_type;
0070 
0071 private:
0072     value_function_type my_value_function;
0073     std::size_t const my_number_of_lru_history_items;
0074     map_storage_type my_map_storage;
0075     lru_list_type my_lru_list;
0076     aggregator_type my_aggregator;
0077 
0078 public:
0079     typedef handle_object handle;
0080 
0081 public:
0082     concurrent_lru_cache(value_function_type f, std::size_t number_of_lru_history_items)
0083         : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items)
0084     {
0085         my_aggregator.initialize_handler(aggregator_function_type(this));
0086     }
0087 
0088     handle_object operator[](key_type k){
0089         retrieve_aggregator_operation op(k);
0090         my_aggregator.execute(&op);
0091         if (op.is_new_value_needed()){
0092              op.result().second.my_value = my_value_function(k);
0093              __TBB_store_with_release(op.result().second.my_is_ready, true);
0094         }else{
0095             tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,false);
0096         }
0097         return handle_object(*this,op.result());
0098     }
0099 private:
0100     void signal_end_of_usage(typename map_storage_type::reference value_ref){
0101         signal_end_of_usage_aggregator_operation op(value_ref);
0102         my_aggregator.execute(&op);
0103     }
0104 
0105 private:
0106 #if !__TBB_CPP11_RVALUE_REF_PRESENT
0107     struct handle_move_t:no_assign{
0108         concurrent_lru_cache & my_cache_ref;
0109         typename map_storage_type::reference my_map_record_ref;
0110         handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {};
0111     };
0112 #endif
0113     class handle_object {
0114         concurrent_lru_cache * my_cache_pointer;
0115         typename map_storage_type::pointer my_map_record_ptr;
0116     public:
0117         handle_object() : my_cache_pointer(), my_map_record_ptr() {}
0118         handle_object(concurrent_lru_cache& cache_ref, typename map_storage_type::reference value_ref) : my_cache_pointer(&cache_ref), my_map_record_ptr(&value_ref) {}
0119         operator bool() const {
0120             return (my_cache_pointer && my_map_record_ptr);
0121         }
0122 #if __TBB_CPP11_RVALUE_REF_PRESENT
0123         // TODO: add check for double moved objects by special dedicated field
0124         handle_object(handle_object&& src) : my_cache_pointer(src.my_cache_pointer), my_map_record_ptr(src.my_map_record_ptr) {
0125             __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?");
0126             src.my_cache_pointer = NULL;
0127             src.my_map_record_ptr = NULL;
0128         }
0129         handle_object& operator=(handle_object&& src) {
0130             __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?");
0131             if (my_cache_pointer) {
0132                 my_cache_pointer->signal_end_of_usage(*my_map_record_ptr);
0133             }
0134             my_cache_pointer = src.my_cache_pointer;
0135             my_map_record_ptr = src.my_map_record_ptr;
0136             src.my_cache_pointer = NULL;
0137             src.my_map_record_ptr = NULL;
0138             return *this;
0139         }
0140 #else
0141         handle_object(handle_move_t m) : my_cache_pointer(&m.my_cache_ref), my_map_record_ptr(&m.my_map_record_ref) {}
0142         handle_object& operator=(handle_move_t m) {
0143             if (my_cache_pointer) {
0144                 my_cache_pointer->signal_end_of_usage(*my_map_record_ptr);
0145             }
0146             my_cache_pointer = &m.my_cache_ref;
0147             my_map_record_ptr = &m.my_map_record_ref;
0148             return *this;
0149         }
0150         operator handle_move_t(){
0151             return move(*this);
0152         }
0153 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
0154         value_type& value(){
0155             __TBB_ASSERT(my_cache_pointer,"get value from already moved object?");
0156             __TBB_ASSERT(my_map_record_ptr,"get value from an invalid or already moved object?");
0157             return my_map_record_ptr->second.my_value;
0158         }
0159         ~handle_object(){
0160             if (my_cache_pointer){
0161                 my_cache_pointer->signal_end_of_usage(*my_map_record_ptr);
0162             }
0163         }
0164     private:
0165 #if __TBB_CPP11_RVALUE_REF_PRESENT
0166         // For source compatibility with C++03
0167         friend handle_object&& move(handle_object& h){
0168             return std::move(h);
0169         }
0170 #else
0171         friend handle_move_t move(handle_object& h){
0172             return handle_object::move(h);
0173         }
0174         // TODO: add check for double moved objects by special dedicated field
0175         static handle_move_t move(handle_object& h){
0176             __TBB_ASSERT((h.my_cache_pointer && h.my_map_record_ptr) || (!h.my_cache_pointer && !h.my_map_record_ptr), "invalid state of moving object?");
0177             concurrent_lru_cache * cache_pointer = h.my_cache_pointer;
0178             typename map_storage_type::pointer map_record_ptr = h.my_map_record_ptr;
0179             h.my_cache_pointer = NULL;
0180             h.my_map_record_ptr = NULL;
0181             return handle_move_t(*cache_pointer, *map_record_ptr);
0182         }
0183 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
0184     private:
0185         void operator=(handle_object&);
0186 #if __SUNPRO_CC
0187     // Presumably due to a compiler error, private copy constructor
0188     // breaks expressions like handle h = cache[key];
0189     public:
0190 #endif
0191         handle_object(handle_object &);
0192     };
0193 private:
0194     //TODO: looks like aggregator_operation is a perfect match for statically typed variant type
0195     struct aggregator_operation : tbb::internal::aggregated_operation<aggregator_operation>{
0196         enum e_op_type {op_retive, op_signal_end_of_usage};
0197         //TODO: try to use pointer to function apply_visitor here
0198         //TODO: try virtual functions and measure the difference
0199         e_op_type my_operation_type;
0200         aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {}
0201         void cast_and_handle(self_type& container ){
0202             if (my_operation_type==op_retive){
0203                 static_cast<retrieve_aggregator_operation*>(this)->handle(container);
0204             }else{
0205                 static_cast<signal_end_of_usage_aggregator_operation*>(this)->handle(container);
0206             }
0207         }
0208     };
0209     struct retrieve_aggregator_operation : aggregator_operation, private internal::no_assign {
0210         key_type my_key;
0211         typename map_storage_type::pointer my_result_map_record_pointer;
0212         bool my_is_new_value_needed;
0213         retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(false){}
0214         void handle(self_type& container ){
0215             my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed);
0216         }
0217         typename map_storage_type::reference result(){ return * my_result_map_record_pointer; }
0218         bool is_new_value_needed(){return my_is_new_value_needed;}
0219     };
0220     struct signal_end_of_usage_aggregator_operation : aggregator_operation, private internal::no_assign {
0221         typename map_storage_type::reference my_map_record_ref;
0222         signal_end_of_usage_aggregator_operation(typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){}
0223         void handle(self_type& container ){
0224             container.signal_end_of_usage_serial(my_map_record_ref);
0225         }
0226     };
0227 
0228 private:
0229    void handle_operations(aggregator_operation* op_list){
0230        while(op_list){
0231            op_list->cast_and_handle(*this);
0232            aggregator_operation* tmp = op_list;
0233            op_list=op_list->next;
0234            tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1));
0235        }
0236    }
0237 
0238 private:
0239    typename map_storage_type::reference retrieve_serial(key_type k, bool& is_new_value_needed){
0240         typename map_storage_type::iterator it = my_map_storage.find(k);
0241         if (it == my_map_storage.end()){
0242             it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false)));
0243             is_new_value_needed = true;
0244         }else {
0245             typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator;
0246             if (list_it!=my_lru_list.end()) {
0247                 __TBB_ASSERT(!it->second.my_ref_counter,"item to be evicted should not have a live references");
0248                 //item is going to be used. Therefore it is not a subject for eviction
0249                 //so - remove it from LRU history.
0250                 my_lru_list.erase(list_it);
0251                 it->second.my_lru_list_iterator= my_lru_list.end();
0252             }
0253         }
0254         ++(it->second.my_ref_counter);
0255         return *it;
0256     }
0257 
0258     void signal_end_of_usage_serial(typename map_storage_type::reference map_record_ref){
0259         typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first);
0260         __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world");
0261         __TBB_ASSERT(&(*it) == &map_record_ref,"dangling reference has been returned to outside world? data race ?");
0262         __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it),
0263                 "object in use should not be in list of unused objects ");
0264         if (! --(it->second.my_ref_counter)){
0265             //it was the last reference so put it to the LRU history
0266             if (my_lru_list.size()>=my_number_of_lru_history_items){
0267                 //evict items in order to get a space
0268                 size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items;
0269                 for (size_t i=0; i<number_of_elements_to_evict; ++i){
0270                     typename map_storage_type::iterator it_to_evict = my_lru_list.back();
0271                     __TBB_ASSERT(!it_to_evict->second.my_ref_counter,"item to be evicted should not have a live references");
0272                     my_lru_list.pop_back();
0273                     my_map_storage.erase(it_to_evict);
0274                 }
0275             }
0276             my_lru_list.push_front(it);
0277             it->second.my_lru_list_iterator = my_lru_list.begin();
0278         }
0279     }
0280 };
0281 } // namespace interface6
0282 
0283 using interface6::concurrent_lru_cache;
0284 
0285 } // namespace tbb
0286 
0287 #include "internal/_warning_suppress_disable_notice.h"
0288 #undef __TBB_concurrent_lru_cache_H_include_area
0289 
0290 #endif //__TBB_concurrent_lru_cache_H