Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-30 09:33:57

0001 /*
0002  * Distributed under the Boost Software License, Version 1.0.
0003  * (See accompanying file LICENSE_1_0.txt or copy at
0004  * http://www.boost.org/LICENSE_1_0.txt)
0005  *
0006  * Copyright (c) 2009 Helge Bahmann
0007  * Copyright (c) 2013 Tim Blechmann
0008  * Copyright (c) 2014 Andrey Semashev
0009  */
0010 /*!
0011  * \file   atomic/detail/ops_gcc_ppc_common.hpp
0012  *
0013  * This header contains basic utilities for gcc PowerPC backend.
0014  */
0015 
0016 #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
0017 #define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
0018 
0019 #include <boost/memory_order.hpp>
0020 #include <boost/atomic/detail/config.hpp>
0021 #include <boost/atomic/detail/header.hpp>
0022 
0023 #ifdef BOOST_HAS_PRAGMA_ONCE
0024 #pragma once
0025 #endif
0026 
0027 namespace boost {
0028 namespace atomics {
0029 namespace detail {
0030 
0031 // The implementation below uses information from this document:
0032 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
0033 
0034 // A note about memory_order_consume. Technically, this architecture allows to avoid
0035 // unnecessary memory barrier after consume load since it supports data dependency ordering.
0036 // However, some compiler optimizations may break a seemingly valid code relying on data
0037 // dependency tracking by injecting bogus branches to aid out of order execution.
0038 // This may happen not only in Boost.Atomic code but also in user's code, which we have no
0039 // control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
0040 // For this reason we promote memory_order_consume to memory_order_acquire.
0041 
0042 struct core_arch_operations_gcc_ppc_base
0043 {
0044     static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
0045     static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
0046 
0047     static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
0048     {
0049 #if defined(__powerpc64__) || defined(__PPC64__)
0050         if (order == memory_order_seq_cst)
0051             __asm__ __volatile__ ("sync" ::: "memory");
0052         else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
0053             __asm__ __volatile__ ("lwsync" ::: "memory");
0054 #else
0055         if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
0056             __asm__ __volatile__ ("sync" ::: "memory");
0057 #endif
0058     }
0059 
0060     static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
0061     {
0062         if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
0063             __asm__ __volatile__ ("isync" ::: "memory");
0064     }
0065 };
0066 
0067 } // namespace detail
0068 } // namespace atomics
0069 } // namespace boost
0070 
0071 #include <boost/atomic/detail/footer.hpp>
0072 
0073 #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_