Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-30 09:31:43

0001 //
0002 // Copyright 2020 The Abseil Authors.
0003 //
0004 // Licensed under the Apache License, Version 2.0 (the "License");
0005 // you may not use this file except in compliance with the License.
0006 // You may obtain a copy of the License at
0007 //
0008 //      https://www.apache.org/licenses/LICENSE-2.0
0009 //
0010 // Unless required by applicable law or agreed to in writing, software
0011 // distributed under the License is distributed on an "AS IS" BASIS,
0012 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013 // See the License for the specific language governing permissions and
0014 // limitations under the License.
0015 
0016 #ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
0017 #define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
0018 
0019 #include <stddef.h>
0020 #include <stdint.h>
0021 
0022 #include <atomic>
0023 #include <cassert>
0024 #include <cstring>
0025 
0026 #include "absl/base/optimization.h"
0027 
0028 namespace absl {
0029 ABSL_NAMESPACE_BEGIN
0030 namespace flags_internal {
0031 
0032 // Align 'x' up to the nearest 'align' bytes.
0033 inline constexpr size_t AlignUp(size_t x, size_t align) {
0034   return align * ((x + align - 1) / align);
0035 }
0036 
0037 // A SequenceLock implements lock-free reads. A sequence counter is incremented
0038 // before and after each write, and readers access the counter before and after
0039 // accessing the protected data. If the counter is verified to not change during
0040 // the access, and the sequence counter value was even, then the reader knows
0041 // that the read was race-free and valid. Otherwise, the reader must fall back
0042 // to a Mutex-based code path.
0043 //
0044 // This particular SequenceLock starts in an "uninitialized" state in which
0045 // TryRead() returns false. It must be enabled by calling MarkInitialized().
0046 // This serves as a marker that the associated flag value has not yet been
0047 // initialized and a slow path needs to be taken.
0048 //
0049 // The memory reads and writes protected by this lock must use the provided
0050 // `TryRead()` and `Write()` functions. These functions behave similarly to
0051 // `memcpy()`, with one oddity: the protected data must be an array of
0052 // `std::atomic<uint64>`. This is to comply with the C++ standard, which
0053 // considers data races on non-atomic objects to be undefined behavior. See "Can
0054 // Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
0055 // Boehm for more details.
0056 //
0057 // [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
0058 class SequenceLock {
0059  public:
0060   constexpr SequenceLock() : lock_(kUninitialized) {}
0061 
0062   // Mark that this lock is ready for use.
0063   void MarkInitialized() {
0064     assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
0065     lock_.store(0, std::memory_order_release);
0066   }
0067 
0068   // Copy "size" bytes of data from "src" to "dst", protected as a read-side
0069   // critical section of the sequence lock.
0070   //
0071   // Unlike traditional sequence lock implementations which loop until getting a
0072   // clean read, this implementation returns false in the case of concurrent
0073   // calls to `Write`. In such a case, the caller should fall back to a
0074   // locking-based slow path.
0075   //
0076   // Returns false if the sequence lock was not yet marked as initialized.
0077   //
0078   // NOTE: If this returns false, "dst" may be overwritten with undefined
0079   // (potentially uninitialized) data.
0080   bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
0081     // Acquire barrier ensures that no loads done by f() are reordered
0082     // above the first load of the sequence counter.
0083     int64_t seq_before = lock_.load(std::memory_order_acquire);
0084     if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
0085     RelaxedCopyFromAtomic(dst, src, size);
0086     // Another acquire fence ensures that the load of 'lock_' below is
0087     // strictly ordered after the RelaxedCopyToAtomic call above.
0088     std::atomic_thread_fence(std::memory_order_acquire);
0089     int64_t seq_after = lock_.load(std::memory_order_relaxed);
0090     return ABSL_PREDICT_TRUE(seq_before == seq_after);
0091   }
0092 
0093   // Copy "size" bytes from "src" to "dst" as a write-side critical section
0094   // of the sequence lock. Any concurrent readers will be forced to retry
0095   // until they get a read that does not conflict with this write.
0096   //
0097   // This call must be externally synchronized against other calls to Write,
0098   // but may proceed concurrently with reads.
0099   void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
0100     // We can use relaxed instructions to increment the counter since we
0101     // are extenally synchronized. The std::atomic_thread_fence below
0102     // ensures that the counter updates don't get interleaved with the
0103     // copy to the data.
0104     int64_t orig_seq = lock_.load(std::memory_order_relaxed);
0105     assert((orig_seq & 1) == 0);  // Must be initially unlocked.
0106     lock_.store(orig_seq + 1, std::memory_order_relaxed);
0107 
0108     // We put a release fence between update to lock_ and writes to shared data.
0109     // Thus all stores to shared data are effectively release operations and
0110     // update to lock_ above cannot be re-ordered past any of them. Note that
0111     // this barrier is not for the fetch_add above.  A release barrier for the
0112     // fetch_add would be before it, not after.
0113     std::atomic_thread_fence(std::memory_order_release);
0114     RelaxedCopyToAtomic(dst, src, size);
0115     // "Release" semantics ensure that none of the writes done by
0116     // RelaxedCopyToAtomic() can be reordered after the following modification.
0117     lock_.store(orig_seq + 2, std::memory_order_release);
0118   }
0119 
0120   // Return the number of times that Write() has been called.
0121   //
0122   // REQUIRES: This must be externally synchronized against concurrent calls to
0123   // `Write()` or `IncrementModificationCount()`.
0124   // REQUIRES: `MarkInitialized()` must have been previously called.
0125   int64_t ModificationCount() const {
0126     int64_t val = lock_.load(std::memory_order_relaxed);
0127     assert(val != kUninitialized && (val & 1) == 0);
0128     return val / 2;
0129   }
0130 
0131   // REQUIRES: This must be externally synchronized against concurrent calls to
0132   // `Write()` or `ModificationCount()`.
0133   // REQUIRES: `MarkInitialized()` must have been previously called.
0134   void IncrementModificationCount() {
0135     int64_t val = lock_.load(std::memory_order_relaxed);
0136     assert(val != kUninitialized);
0137     lock_.store(val + 2, std::memory_order_relaxed);
0138   }
0139 
0140  private:
0141   // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
0142   // atomics.
0143   static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
0144                                     size_t size) {
0145     char* dst_byte = static_cast<char*>(dst);
0146     while (size >= sizeof(uint64_t)) {
0147       uint64_t word = src->load(std::memory_order_relaxed);
0148       std::memcpy(dst_byte, &word, sizeof(word));
0149       dst_byte += sizeof(word);
0150       src++;
0151       size -= sizeof(word);
0152     }
0153     if (size > 0) {
0154       uint64_t word = src->load(std::memory_order_relaxed);
0155       std::memcpy(dst_byte, &word, size);
0156     }
0157   }
0158 
0159   // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
0160   // atomics.
0161   static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
0162                                   size_t size) {
0163     const char* src_byte = static_cast<const char*>(src);
0164     while (size >= sizeof(uint64_t)) {
0165       uint64_t word;
0166       std::memcpy(&word, src_byte, sizeof(word));
0167       dst->store(word, std::memory_order_relaxed);
0168       src_byte += sizeof(word);
0169       dst++;
0170       size -= sizeof(word);
0171     }
0172     if (size > 0) {
0173       uint64_t word = 0;
0174       std::memcpy(&word, src_byte, size);
0175       dst->store(word, std::memory_order_relaxed);
0176     }
0177   }
0178 
0179   static constexpr int64_t kUninitialized = -1;
0180   std::atomic<int64_t> lock_;
0181 };
0182 
0183 }  // namespace flags_internal
0184 ABSL_NAMESPACE_END
0185 }  // namespace absl
0186 
0187 #endif  // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_