Warning, file /include/oneapi/tbb/concurrent_lru_cache.h was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_concurrent_lru_cache_H
0018 #define __TBB_concurrent_lru_cache_H
0019
0020 #if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE
0021 #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h
0022 #endif
0023
0024 #include "detail/_assert.h"
0025 #include "detail/_aggregator.h"
0026
0027 #include <map> // for std::map
0028 #include <list> // for std::list
0029 #include <utility> // for std::make_pair
0030 #include <algorithm> // for std::find
0031 #include <atomic> // for std::atomic<bool>
0032
0033 namespace tbb {
0034
0035 namespace detail {
0036 namespace d1 {
0037
0038
0039
0040
0041
0042 template<typename KeyT, typename ValT, typename KeyToValFunctorT = ValT (*) (KeyT)>
0043 class concurrent_lru_cache : no_assign {
0044
0045 private:
0046 struct handle_object;
0047 struct storage_map_value_type;
0048
0049 struct aggregator_operation;
0050 struct retrieve_aggregator_operation;
0051 struct signal_end_of_usage_aggregator_operation;
0052
0053
0054 public:
0055 using key_type = KeyT;
0056 using value_type = ValT;
0057 using pointer = ValT*;
0058 using reference = ValT&;
0059 using const_pointer = const ValT*;
0060 using const_reference = const ValT&;
0061
0062 using value_function_type = KeyToValFunctorT;
0063 using handle = handle_object;
0064 private:
0065 using lru_cache_type = concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>;
0066
0067 using storage_map_type = std::map<key_type, storage_map_value_type>;
0068 using storage_map_iterator_type = typename storage_map_type::iterator;
0069 using storage_map_pointer_type = typename storage_map_type::pointer;
0070 using storage_map_reference_type = typename storage_map_type::reference;
0071
0072 using history_list_type = std::list<storage_map_iterator_type>;
0073 using history_list_iterator_type = typename history_list_type::iterator;
0074
0075 using aggregator_operation_type = aggregator_operation;
0076 using aggregator_function_type = aggregating_functor<lru_cache_type, aggregator_operation_type>;
0077 using aggregator_type = aggregator<aggregator_function_type, aggregator_operation_type>;
0078
0079 friend class aggregating_functor<lru_cache_type,aggregator_operation_type>;
0080
0081
0082 private:
0083 value_function_type my_value_function;
0084 aggregator_type my_aggregator;
0085
0086 storage_map_type my_storage_map;
0087 history_list_type my_history_list;
0088 const std::size_t my_history_list_capacity;
0089
0090
0091 public:
0092
0093 concurrent_lru_cache(value_function_type value_function, std::size_t cache_capacity)
0094 : my_value_function(value_function), my_history_list_capacity(cache_capacity) {
0095 my_aggregator.initialize_handler(aggregator_function_type(this));
0096 }
0097
0098 handle operator[](key_type key) {
0099 retrieve_aggregator_operation op(key);
0100 my_aggregator.execute(&op);
0101
0102 if (op.is_new_value_needed()) {
0103 op.result().second.my_value = my_value_function(key);
0104 op.result().second.my_is_ready.store(true, std::memory_order_release);
0105 } else {
0106 spin_wait_while_eq(op.result().second.my_is_ready, false);
0107 }
0108
0109 return handle(*this, op.result());
0110 }
0111
0112 private:
0113
0114 void handle_operations(aggregator_operation* op_list) {
0115 while (op_list) {
0116 op_list->cast_and_handle(*this);
0117 aggregator_operation* prev_op = op_list;
0118 op_list = op_list->next;
0119
0120 (prev_op->status).store(1, std::memory_order_release);
0121 }
0122 }
0123
0124 void signal_end_of_usage(storage_map_reference_type map_record_ref) {
0125 signal_end_of_usage_aggregator_operation op(map_record_ref);
0126 my_aggregator.execute(&op);
0127 }
0128
0129 void signal_end_of_usage_serial(storage_map_reference_type map_record_ref) {
0130 storage_map_iterator_type map_it = my_storage_map.find(map_record_ref.first);
0131
0132 __TBB_ASSERT(map_it != my_storage_map.end(),
0133 "cache should not return past-end iterators to outer world");
0134 __TBB_ASSERT(&(*map_it) == &map_record_ref,
0135 "dangling reference has been returned to outside world: data race?");
0136 __TBB_ASSERT(std::find(my_history_list.begin(), my_history_list.end(), map_it) == my_history_list.end(),
0137 "object in use should not be in list of unused objects ");
0138
0139
0140 if (! --(map_it->second.my_ref_counter)) {
0141
0142 if (my_history_list.size() >= my_history_list_capacity) {
0143 if (my_history_list_capacity == 0) {
0144
0145 my_storage_map.erase(map_it);
0146 return;
0147 }
0148 std::size_t number_of_elements_to_evict = 1 + my_history_list.size() - my_history_list_capacity;
0149
0150 for (std::size_t i = 0; i < number_of_elements_to_evict; ++i) {
0151 storage_map_iterator_type map_it_to_evict = my_history_list.back();
0152
0153 __TBB_ASSERT(map_it_to_evict->second.my_ref_counter == 0,
0154 "item to be evicted should not have a live references");
0155
0156
0157 my_history_list.pop_back();
0158 my_storage_map.erase(map_it_to_evict);
0159 }
0160 }
0161
0162
0163 my_history_list.push_front(map_it);
0164 map_it->second.my_history_list_iterator = my_history_list.begin();
0165 }
0166 }
0167
0168 storage_map_reference_type retrieve_serial(key_type key, bool& is_new_value_needed) {
0169 storage_map_iterator_type map_it = my_storage_map.find(key);
0170
0171 if (map_it == my_storage_map.end()) {
0172 map_it = my_storage_map.emplace_hint(
0173 map_it, std::piecewise_construct, std::make_tuple(key), std::make_tuple(value_type(), 0, my_history_list.end(), false));
0174 is_new_value_needed = true;
0175 } else {
0176 history_list_iterator_type list_it = map_it->second.my_history_list_iterator;
0177 if (list_it != my_history_list.end()) {
0178 __TBB_ASSERT(map_it->second.my_ref_counter == 0,
0179 "item to be evicted should not have a live references");
0180
0181
0182
0183 my_history_list.erase(list_it);
0184 map_it->second.my_history_list_iterator = my_history_list.end();
0185 }
0186 }
0187
0188 ++(map_it->second.my_ref_counter);
0189 return *map_it;
0190 }
0191 };
0192
0193
0194
0195
0196
0197 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
0198 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::storage_map_value_type {
0199
0200 public:
0201 using ref_counter_type = std::size_t;
0202
0203
0204 public:
0205 value_type my_value;
0206 ref_counter_type my_ref_counter;
0207 history_list_iterator_type my_history_list_iterator;
0208 std::atomic<bool> my_is_ready;
0209
0210
0211 public:
0212 storage_map_value_type(
0213 value_type const& value, ref_counter_type ref_counter,
0214 history_list_iterator_type history_list_iterator, bool is_ready)
0215 : my_value(value), my_ref_counter(ref_counter),
0216 my_history_list_iterator(history_list_iterator), my_is_ready(is_ready) {}
0217 };
0218
0219
0220
0221
0222
0223 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
0224 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::handle_object {
0225
0226 private:
0227 lru_cache_type* my_lru_cache_ptr;
0228 storage_map_pointer_type my_map_record_ptr;
0229
0230
0231 public:
0232 handle_object()
0233 : my_lru_cache_ptr(nullptr), my_map_record_ptr(nullptr) {}
0234 handle_object(lru_cache_type& lru_cache_ref, storage_map_reference_type map_record_ref)
0235 : my_lru_cache_ptr(&lru_cache_ref), my_map_record_ptr(&map_record_ref) {}
0236
0237 handle_object(handle_object&) = delete;
0238 void operator=(handle_object&) = delete;
0239
0240 handle_object(handle_object&& other)
0241 : my_lru_cache_ptr(other.my_lru_cache_ptr), my_map_record_ptr(other.my_map_record_ptr) {
0242
0243 __TBB_ASSERT(
0244 (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) ||
0245 (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr),
0246 "invalid state of moving object?");
0247
0248 other.my_lru_cache_ptr = nullptr;
0249 other.my_map_record_ptr = nullptr;
0250 }
0251
0252 handle_object& operator=(handle_object&& other) {
0253 __TBB_ASSERT(
0254 (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) ||
0255 (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr),
0256 "invalid state of moving object?");
0257
0258 if (my_lru_cache_ptr)
0259 my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr);
0260
0261 my_lru_cache_ptr = other.my_lru_cache_ptr;
0262 my_map_record_ptr = other.my_map_record_ptr;
0263 other.my_lru_cache_ptr = nullptr;
0264 other.my_map_record_ptr = nullptr;
0265
0266 return *this;
0267 }
0268
0269 ~handle_object() {
0270 if (my_lru_cache_ptr)
0271 my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr);
0272 }
0273
0274 operator bool() const {
0275 return (my_lru_cache_ptr && my_map_record_ptr);
0276 }
0277
0278 value_type& value() {
0279 __TBB_ASSERT(my_lru_cache_ptr, "get value from already moved object?");
0280 __TBB_ASSERT(my_map_record_ptr, "get value from an invalid or already moved object?");
0281
0282 return my_map_record_ptr->second.my_value;
0283 }
0284 };
0285
0286
0287
0288
0289
0290 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
0291 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::aggregator_operation
0292 : aggregated_operation<aggregator_operation> {
0293
0294 public:
0295 enum class op_type { retrieve, signal_end_of_usage };
0296
0297
0298 private:
0299 op_type my_op;
0300
0301
0302 public:
0303 aggregator_operation(op_type op) : my_op(op) {}
0304
0305
0306
0307
0308
0309 void cast_and_handle(lru_cache_type& lru_cache_ref) {
0310 if (my_op == op_type::retrieve)
0311 static_cast<retrieve_aggregator_operation*>(this)->handle(lru_cache_ref);
0312 else
0313 static_cast<signal_end_of_usage_aggregator_operation*>(this)->handle(lru_cache_ref);
0314 }
0315 };
0316
0317 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
0318 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::retrieve_aggregator_operation
0319 : aggregator_operation, private no_assign {
0320 public:
0321 key_type my_key;
0322 storage_map_pointer_type my_map_record_ptr;
0323 bool my_is_new_value_needed;
0324
0325 public:
0326 retrieve_aggregator_operation(key_type key)
0327 : aggregator_operation(aggregator_operation::op_type::retrieve),
0328 my_key(key), my_map_record_ptr(nullptr), my_is_new_value_needed(false) {}
0329
0330 void handle(lru_cache_type& lru_cache_ref) {
0331 my_map_record_ptr = &lru_cache_ref.retrieve_serial(my_key, my_is_new_value_needed);
0332 }
0333
0334 storage_map_reference_type result() {
0335 __TBB_ASSERT(my_map_record_ptr, "Attempt to call result() before calling handle()");
0336 return *my_map_record_ptr;
0337 }
0338
0339 bool is_new_value_needed() { return my_is_new_value_needed; }
0340 };
0341
0342 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
0343 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::signal_end_of_usage_aggregator_operation
0344 : aggregator_operation, private no_assign {
0345
0346 private:
0347 storage_map_reference_type my_map_record_ref;
0348
0349 public:
0350 signal_end_of_usage_aggregator_operation(storage_map_reference_type map_record_ref)
0351 : aggregator_operation(aggregator_operation::op_type::signal_end_of_usage),
0352 my_map_record_ref(map_record_ref) {}
0353
0354 void handle(lru_cache_type& lru_cache_ref) {
0355 lru_cache_ref.signal_end_of_usage_serial(my_map_record_ref);
0356 }
0357 };
0358
0359
0360
0361
0362
0363
0364 }
0365 }
0366
0367 inline namespace v1 {
0368
0369 using detail::d1::concurrent_lru_cache;
0370
0371 }
0372 }
0373
0374 #endif