Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:27:16

0001 // Copyright 2022 The Abseil Authors
0002 //
0003 // Licensed under the Apache License, Version 2.0 (the "License");
0004 // you may not use this file except in compliance with the License.
0005 // You may obtain a copy of the License at
0006 //
0007 //     https://www.apache.org/licenses/LICENSE-2.0
0008 //
0009 // Unless required by applicable law or agreed to in writing, software
0010 // distributed under the License is distributed on an "AS IS" BASIS,
0011 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0012 // See the License for the specific language governing permissions and
0013 // limitations under the License.
0014 
0015 #ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
0016 #define ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_
0017 
0018 #ifdef _MSC_VER
0019 #include <intrin.h>
0020 #endif
0021 
0022 #if defined(__SSE__) || defined(__AVX__)
0023 // Pulls in both SSE and AVX intrinsics.
0024 #include <immintrin.h>
0025 #endif
0026 
0027 #ifdef __aarch64__
0028 #include "absl/crc/internal/non_temporal_arm_intrinsics.h"
0029 #endif
0030 
0031 #include <algorithm>
0032 #include <cassert>
0033 #include <cstdint>
0034 #include <cstring>
0035 
0036 #include "absl/base/attributes.h"
0037 #include "absl/base/config.h"
0038 #include "absl/base/optimization.h"
0039 
0040 namespace absl {
0041 ABSL_NAMESPACE_BEGIN
0042 namespace crc_internal {
0043 
0044 // This non-temporal memcpy does regular load and non-temporal store memory
0045 // copy. It is compatible to both 16-byte aligned and unaligned addresses. If
0046 // data at the destination is not immediately accessed, using non-temporal
0047 // memcpy can save 1 DRAM load of the destination cacheline.
0048 constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE;
0049 
0050 // If the objects overlap, the behavior is undefined. Uses regular memcpy
0051 // instead of non-temporal memcpy if the required CPU intrinsics are unavailable
0052 // at compile time.
0053 inline void *non_temporal_store_memcpy(void *__restrict dst,
0054                                        const void *__restrict src, size_t len) {
0055 #if defined(__SSE3__) || defined(__aarch64__) || \
0056     (defined(_MSC_VER) && defined(__AVX__))
0057   // This implementation requires SSE3.
0058   // MSVC cannot target SSE3 directly, but when MSVC targets AVX,
0059   // SSE3 support is implied.
0060   uint8_t *d = reinterpret_cast<uint8_t *>(dst);
0061   const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
0062 
0063   // memcpy() the misaligned header. At the end of this if block, <d> is
0064   // aligned to a 64-byte cacheline boundary or <len> == 0.
0065   if (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1)) {
0066     uintptr_t bytes_before_alignment_boundary =
0067         kCacheLineSize -
0068         (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1));
0069     size_t header_len = (std::min)(bytes_before_alignment_boundary, len);
0070     assert(bytes_before_alignment_boundary < kCacheLineSize);
0071     memcpy(d, s, header_len);
0072     d += header_len;
0073     s += header_len;
0074     len -= header_len;
0075   }
0076 
0077   if (len >= kCacheLineSize) {
0078     _mm_sfence();
0079     __m128i *dst_cacheline = reinterpret_cast<__m128i *>(d);
0080     const __m128i *src_cacheline = reinterpret_cast<const __m128i *>(s);
0081     constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m128i);
0082     size_t loops = len / kCacheLineSize;
0083 
0084     while (len >= kCacheLineSize) {
0085       __m128i temp1, temp2, temp3, temp4;
0086       temp1 = _mm_lddqu_si128(src_cacheline + 0);
0087       temp2 = _mm_lddqu_si128(src_cacheline + 1);
0088       temp3 = _mm_lddqu_si128(src_cacheline + 2);
0089       temp4 = _mm_lddqu_si128(src_cacheline + 3);
0090       _mm_stream_si128(dst_cacheline + 0, temp1);
0091       _mm_stream_si128(dst_cacheline + 1, temp2);
0092       _mm_stream_si128(dst_cacheline + 2, temp3);
0093       _mm_stream_si128(dst_cacheline + 3, temp4);
0094       src_cacheline += kOpsPerCacheLine;
0095       dst_cacheline += kOpsPerCacheLine;
0096       len -= kCacheLineSize;
0097     }
0098     d += loops * kCacheLineSize;
0099     s += loops * kCacheLineSize;
0100     _mm_sfence();
0101   }
0102 
0103   // memcpy the tail.
0104   if (len) {
0105     memcpy(d, s, len);
0106   }
0107   return dst;
0108 #else
0109   // Fallback to regular memcpy.
0110   return memcpy(dst, src, len);
0111 #endif  // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__)
0112 }
0113 
0114 // If the objects overlap, the behavior is undefined. Uses regular memcpy
0115 // instead of non-temporal memcpy if the required CPU intrinsics are unavailable
0116 // at compile time.
0117 #if ABSL_HAVE_CPP_ATTRIBUTE(gnu::target) && \
0118     (defined(__x86_64__) || defined(__i386__))
0119 [[gnu::target("avx")]]
0120 #endif
0121 inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
0122                                            const void *__restrict src,
0123                                            size_t len) {
0124   // This function requires AVX. For clang and gcc we compile it with AVX even
0125   // if the translation unit isn't built with AVX support. This works because we
0126   // only select this implementation at runtime if the CPU supports AVX.
0127 #if defined(__SSE3__) || (defined(_MSC_VER) && defined(__AVX__))
0128   uint8_t *d = reinterpret_cast<uint8_t *>(dst);
0129   const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
0130 
0131   // memcpy() the misaligned header. At the end of this if block, <d> is
0132   // aligned to a 64-byte cacheline boundary or <len> == 0.
0133   if (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1)) {
0134     uintptr_t bytes_before_alignment_boundary =
0135         kCacheLineSize -
0136         (reinterpret_cast<uintptr_t>(d) & (kCacheLineSize - 1));
0137     size_t header_len = (std::min)(bytes_before_alignment_boundary, len);
0138     assert(bytes_before_alignment_boundary < kCacheLineSize);
0139     memcpy(d, s, header_len);
0140     d += header_len;
0141     s += header_len;
0142     len -= header_len;
0143   }
0144 
0145   if (len >= kCacheLineSize) {
0146     _mm_sfence();
0147     __m256i *dst_cacheline = reinterpret_cast<__m256i *>(d);
0148     const __m256i *src_cacheline = reinterpret_cast<const __m256i *>(s);
0149     constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m256i);
0150     size_t loops = len / kCacheLineSize;
0151 
0152     while (len >= kCacheLineSize) {
0153       __m256i temp1, temp2;
0154       temp1 = _mm256_lddqu_si256(src_cacheline + 0);
0155       temp2 = _mm256_lddqu_si256(src_cacheline + 1);
0156       _mm256_stream_si256(dst_cacheline + 0, temp1);
0157       _mm256_stream_si256(dst_cacheline + 1, temp2);
0158       src_cacheline += kOpsPerCacheLine;
0159       dst_cacheline += kOpsPerCacheLine;
0160       len -= kCacheLineSize;
0161     }
0162     d += loops * kCacheLineSize;
0163     s += loops * kCacheLineSize;
0164     _mm_sfence();
0165   }
0166 
0167   // memcpy the tail.
0168   if (len) {
0169     memcpy(d, s, len);
0170   }
0171   return dst;
0172 #else
0173   return memcpy(dst, src, len);
0174 #endif  // __SSE3__ || (_MSC_VER && __AVX__)
0175 }
0176 
0177 }  // namespace crc_internal
0178 ABSL_NAMESPACE_END
0179 }  // namespace absl
0180 
0181 #endif  // ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_