Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:42:16

0001 
0002 // Copyright Christopher Kormanyos 2002 - 2011.
0003 // Copyright 2011 John Maddock.
0004 // Distributed under the Boost Software License, Version 1.0.
0005 //    (See accompanying file LICENSE_1_0.txt or copy at
0006 //          http://www.boost.org/LICENSE_1_0.txt)
0007 
0008 // This work is based on an earlier work:
0009 // "Algorithm 910: A Portable C++ Multiple-Precision System for Special-Function Calculations",
0010 // in ACM TOMS, {VOL 37, ISSUE 4, (February 2011)} (C) ACM, 2011. http://doi.acm.org/10.1145/1916461.1916469
0011 //
0012 // This file has no include guards or namespaces - it's expanded inline inside default_ops.hpp
0013 //
0014 
0015 #include <boost/multiprecision/detail/standalone_config.hpp>
0016 #include <boost/multiprecision/detail/no_exceptions_support.hpp>
0017 #include <boost/multiprecision/detail/assert.hpp>
0018 
0019 #ifdef BOOST_MSVC
0020 #pragma warning(push)
0021 #pragma warning(disable : 6326) // comparison of two constants
0022 #pragma warning(disable : 4127) // conditional expression is constant
0023 #endif
0024 
0025 template <class T>
0026 void hyp0F1(T& result, const T& b, const T& x)
0027 {
0028    using si_type = typename boost::multiprecision::detail::canonical<std::int32_t, T>::type ;
0029    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0030 
0031    // Compute the series representation of Hypergeometric0F1 taken from
0032    // http://functions.wolfram.com/HypergeometricFunctions/Hypergeometric0F1/06/01/01/
0033    // There are no checks on input range or parameter boundaries.
0034 
0035    T x_pow_n_div_n_fact(x);
0036    T pochham_b(b);
0037    T bp(b);
0038 
0039    eval_divide(result, x_pow_n_div_n_fact, pochham_b);
0040    eval_add(result, ui_type(1));
0041 
0042    si_type n;
0043 
0044    T tol;
0045    tol = ui_type(1);
0046    eval_ldexp(tol, tol, 1 - boost::multiprecision::detail::digits2<number<T, et_on> >::value());
0047    eval_multiply(tol, result);
0048    if (eval_get_sign(tol) < 0)
0049       tol.negate();
0050    T term;
0051 
0052    const int series_limit =
0053        boost::multiprecision::detail::digits2<number<T, et_on> >::value() < 100
0054            ? 100
0055            : boost::multiprecision::detail::digits2<number<T, et_on> >::value();
0056    // Series expansion of hyperg_0f1(; b; x).
0057    for (n = 2; n < series_limit; ++n)
0058    {
0059       eval_multiply(x_pow_n_div_n_fact, x);
0060       eval_divide(x_pow_n_div_n_fact, n);
0061       eval_increment(bp);
0062       eval_multiply(pochham_b, bp);
0063 
0064       eval_divide(term, x_pow_n_div_n_fact, pochham_b);
0065       eval_add(result, term);
0066 
0067       bool neg_term = eval_get_sign(term) < 0;
0068       if (neg_term)
0069          term.negate();
0070       if (term.compare(tol) <= 0)
0071          break;
0072    }
0073 
0074    if (n >= series_limit)
0075       BOOST_MP_THROW_EXCEPTION(std::runtime_error("H0F1 Failed to Converge"));
0076 }
0077 
0078 template <class T, unsigned N, bool b = boost::multiprecision::detail::is_variable_precision<boost::multiprecision::number<T> >::value>
0079 struct scoped_N_precision
0080 {
0081    template <class U>
0082    scoped_N_precision(U const&) {}
0083    template <class U>
0084    void reduce(U&) {}
0085 };
0086 
0087 template <class T, unsigned N>
0088 struct scoped_N_precision<T, N, true>
0089 {
0090    unsigned old_precision, old_arg_precision;
0091    scoped_N_precision(T& arg)
0092    {
0093       old_precision     = T::thread_default_precision();
0094       old_arg_precision = arg.precision();
0095       T::thread_default_precision(old_arg_precision * N);
0096       arg.precision(old_arg_precision * N);
0097    }
0098    ~scoped_N_precision()
0099    {
0100       T::thread_default_precision(old_precision);
0101    }
0102    void reduce(T& arg) 
0103    {
0104       arg.precision(old_arg_precision);
0105    }
0106 };
0107 
0108 template <class T>
0109 void reduce_n_half_pi(T& arg, const T& n, bool go_down)
0110 {
0111    //
0112    // We need to perform argument reduction at 3 times the precision of arg
0113    // in order to ensure a correct result up to arg = 1/epsilon.  Beyond that
0114    // the value of n will have been incorrectly calculated anyway since it will
0115    // have a value greater than 1/epsilon and no longer be an exact integer value.
0116    //
0117    // More information in ARGUMENT REDUCTION FOR HUGE ARGUMENTS. K C Ng.
0118    //
0119    // There are two mutually exclusive ways to achieve this, both of which are 
0120    // supported here:
0121    // 1) To define a fixed precision type with 3 times the precision for the calculation.
0122    // 2) To dynamically increase the precision of the variables.
0123    //
0124    using reduction_type = typename boost::multiprecision::detail::transcendental_reduction_type<T>::type;
0125    //
0126    // Make a copy of the arg at higher precision:
0127    //
0128    reduction_type big_arg(arg);
0129    //
0130    // Dynamically increase precision when supported, this increases the default
0131    // and ups the precision of big_arg to match:
0132    //
0133    scoped_N_precision<T, 3> scoped_precision(big_arg);
0134    //
0135    // High precision PI:
0136    //
0137    reduction_type reduction = get_constant_pi<reduction_type>();
0138    eval_ldexp(reduction, reduction, -1); // divide by 2
0139    eval_multiply(reduction, n);
0140 
0141    BOOST_MATH_INSTRUMENT_CODE(big_arg.str(10, std::ios_base::scientific));
0142    BOOST_MATH_INSTRUMENT_CODE(reduction.str(10, std::ios_base::scientific));
0143 
0144    if (go_down)
0145       eval_subtract(big_arg, reduction, big_arg);
0146    else
0147       eval_subtract(big_arg, reduction);
0148    arg = T(big_arg);
0149    //
0150    // If arg is a variable precision type, then we have just copied the
0151    // precision of big_arg s well it's value.  Reduce the precision now:
0152    //
0153    scoped_precision.reduce(arg);
0154    BOOST_MATH_INSTRUMENT_CODE(big_arg.str(10, std::ios_base::scientific));
0155    BOOST_MATH_INSTRUMENT_CODE(arg.str(10, std::ios_base::scientific));
0156 }
0157 
0158 template <class T>
0159 void eval_sin(T& result, const T& x)
0160 {
0161    static_assert(number_category<T>::value == number_kind_floating_point, "The sin function is only valid for floating point types.");
0162    BOOST_MATH_INSTRUMENT_CODE(x.str(0, std::ios_base::scientific));
0163    if (&result == &x)
0164    {
0165       T temp;
0166       eval_sin(temp, x);
0167       result = temp;
0168       return;
0169    }
0170 
0171    using si_type = typename boost::multiprecision::detail::canonical<std::int32_t, T>::type ;
0172    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0173    using fp_type = typename std::tuple_element<0, typename T::float_types>::type                         ;
0174 
0175    switch (eval_fpclassify(x))
0176    {
0177    case FP_INFINITE:
0178    case FP_NAN:
0179       BOOST_IF_CONSTEXPR(std::numeric_limits<number<T, et_on> >::has_quiet_NaN)
0180       {
0181          result = std::numeric_limits<number<T, et_on> >::quiet_NaN().backend();
0182          errno  = EDOM;
0183       }
0184       else
0185          BOOST_MP_THROW_EXCEPTION(std::domain_error("Result is undefined or complex and there is no NaN for this number type."));
0186       return;
0187    case FP_ZERO:
0188       result = x;
0189       return;
0190    default:;
0191    }
0192 
0193    // Local copy of the argument
0194    T xx = x;
0195 
0196    // Analyze and prepare the phase of the argument.
0197    // Make a local, positive copy of the argument, xx.
0198    // The argument xx will be reduced to 0 <= xx <= pi/2.
0199    bool b_negate_sin = false;
0200 
0201    if (eval_get_sign(x) < 0)
0202    {
0203       xx.negate();
0204       b_negate_sin = !b_negate_sin;
0205    }
0206 
0207    T n_pi, t;
0208    T half_pi = get_constant_pi<T>();
0209    eval_ldexp(half_pi, half_pi, -1); // divide by 2
0210    // Remove multiples of pi/2.
0211    if (xx.compare(half_pi) > 0)
0212    {
0213       eval_divide(n_pi, xx, half_pi);
0214       eval_trunc(n_pi, n_pi);
0215       t = ui_type(4);
0216       eval_fmod(t, n_pi, t);
0217       bool b_go_down = false;
0218       if (t.compare(ui_type(1)) == 0)
0219       {
0220          b_go_down = true;
0221       }
0222       else if (t.compare(ui_type(2)) == 0)
0223       {
0224          b_negate_sin = !b_negate_sin;
0225       }
0226       else if (t.compare(ui_type(3)) == 0)
0227       {
0228          b_negate_sin = !b_negate_sin;
0229          b_go_down    = true;
0230       }
0231 
0232       if (b_go_down)
0233          eval_increment(n_pi);
0234       //
0235       // If n_pi is > 1/epsilon, then it is no longer an exact integer value
0236       // but an approximation.  As a result we can no longer reliably reduce
0237       // xx to 0 <= xx < pi/2, nor can we tell the sign of the result as we need
0238       // n_pi % 4 for that, but that will always be zero in this situation.
0239       // We could use a higher precision type for n_pi, along with division at
0240       // higher precision, but that's rather expensive.  So for now we do not support
0241       // this, and will see if anyone complains and has a legitimate use case.
0242       //
0243       if (n_pi.compare(get_constant_one_over_epsilon<T>()) > 0)
0244       {
0245          result = ui_type(0);
0246          return;
0247       }
0248 
0249       reduce_n_half_pi(xx, n_pi, b_go_down);
0250       //
0251       // Post reduction we may be a few ulp below zero or above pi/2
0252       // given that n_pi was calculated at working precision and not
0253       // at the higher precision used for reduction.  Correct that now:
0254       //
0255       if (eval_get_sign(xx) < 0)
0256       {
0257          xx.negate();
0258          b_negate_sin = !b_negate_sin;
0259       }
0260       if (xx.compare(half_pi) > 0)
0261       {
0262          eval_ldexp(half_pi, half_pi, 1);
0263          eval_subtract(xx, half_pi, xx);
0264          eval_ldexp(half_pi, half_pi, -1);
0265          b_go_down = !b_go_down;
0266       }
0267 
0268       BOOST_MATH_INSTRUMENT_CODE(xx.str(0, std::ios_base::scientific));
0269       BOOST_MATH_INSTRUMENT_CODE(n_pi.str(0, std::ios_base::scientific));
0270       BOOST_MP_ASSERT(xx.compare(half_pi) <= 0);
0271       BOOST_MP_ASSERT(xx.compare(ui_type(0)) >= 0);
0272    }
0273 
0274    t = half_pi;
0275    eval_subtract(t, xx);
0276 
0277    const bool b_zero    = eval_get_sign(xx) == 0;
0278    const bool b_pi_half = eval_get_sign(t) == 0;
0279 
0280    BOOST_MATH_INSTRUMENT_CODE(xx.str(0, std::ios_base::scientific));
0281    BOOST_MATH_INSTRUMENT_CODE(t.str(0, std::ios_base::scientific));
0282 
0283    // Check if the reduced argument is very close to 0 or pi/2.
0284    const bool b_near_zero    = xx.compare(fp_type(1e-1)) < 0;
0285    const bool b_near_pi_half = t.compare(fp_type(1e-1)) < 0;
0286 
0287    if (b_zero)
0288    {
0289       result = ui_type(0);
0290    }
0291    else if (b_pi_half)
0292    {
0293       result = ui_type(1);
0294    }
0295    else if (b_near_zero)
0296    {
0297       eval_multiply(t, xx, xx);
0298       eval_divide(t, si_type(-4));
0299       T t2;
0300       t2 = fp_type(1.5);
0301       hyp0F1(result, t2, t);
0302       BOOST_MATH_INSTRUMENT_CODE(result.str(0, std::ios_base::scientific));
0303       eval_multiply(result, xx);
0304    }
0305    else if (b_near_pi_half)
0306    {
0307       eval_multiply(t, t);
0308       eval_divide(t, si_type(-4));
0309       T t2;
0310       t2 = fp_type(0.5);
0311       hyp0F1(result, t2, t);
0312       BOOST_MATH_INSTRUMENT_CODE(result.str(0, std::ios_base::scientific));
0313    }
0314    else
0315    {
0316       // Scale to a small argument for an efficient Taylor series,
0317       // implemented as a hypergeometric function. Use a standard
0318       // divide by three identity a certain number of times.
0319       // Here we use division by 3^9 --> (19683 = 3^9).
0320 
0321       constexpr si_type n_scale           = 9;
0322       constexpr si_type n_three_pow_scale = static_cast<si_type>(19683L);
0323 
0324       eval_divide(xx, n_three_pow_scale);
0325 
0326       // Now with small arguments, we are ready for a series expansion.
0327       eval_multiply(t, xx, xx);
0328       eval_divide(t, si_type(-4));
0329       T t2;
0330       t2 = fp_type(1.5);
0331       hyp0F1(result, t2, t);
0332       BOOST_MATH_INSTRUMENT_CODE(result.str(0, std::ios_base::scientific));
0333       eval_multiply(result, xx);
0334 
0335       // Convert back using multiple angle identity.
0336       for (std::int32_t k = static_cast<std::int32_t>(0); k < n_scale; k++)
0337       {
0338          // Rescale the cosine value using the multiple angle identity.
0339          eval_multiply(t2, result, ui_type(3));
0340          eval_multiply(t, result, result);
0341          eval_multiply(t, result);
0342          eval_multiply(t, ui_type(4));
0343          eval_subtract(result, t2, t);
0344       }
0345    }
0346 
0347    if (b_negate_sin)
0348       result.negate();
0349    BOOST_MATH_INSTRUMENT_CODE(result.str(0, std::ios_base::scientific));
0350 }
0351 
0352 template <class T>
0353 void eval_cos(T& result, const T& x)
0354 {
0355    static_assert(number_category<T>::value == number_kind_floating_point, "The cos function is only valid for floating point types.");
0356    if (&result == &x)
0357    {
0358       T temp;
0359       eval_cos(temp, x);
0360       result = temp;
0361       return;
0362    }
0363 
0364    using si_type = typename boost::multiprecision::detail::canonical<std::int32_t, T>::type ;
0365    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0366 
0367    switch (eval_fpclassify(x))
0368    {
0369    case FP_INFINITE:
0370    case FP_NAN:
0371       BOOST_IF_CONSTEXPR(std::numeric_limits<number<T, et_on> >::has_quiet_NaN)
0372       {
0373          result = std::numeric_limits<number<T, et_on> >::quiet_NaN().backend();
0374          errno  = EDOM;
0375       }
0376       else
0377          BOOST_MP_THROW_EXCEPTION(std::domain_error("Result is undefined or complex and there is no NaN for this number type."));
0378       return;
0379    case FP_ZERO:
0380       result = ui_type(1);
0381       return;
0382    default:;
0383    }
0384 
0385    // Local copy of the argument
0386    T xx = x;
0387 
0388    // Analyze and prepare the phase of the argument.
0389    // Make a local, positive copy of the argument, xx.
0390    // The argument xx will be reduced to 0 <= xx <= pi/2.
0391    bool b_negate_cos = false;
0392 
0393    if (eval_get_sign(x) < 0)
0394    {
0395       xx.negate();
0396    }
0397    BOOST_MATH_INSTRUMENT_CODE(xx.str(0, std::ios_base::scientific));
0398 
0399    T n_pi, t;
0400    T half_pi = get_constant_pi<T>();
0401    eval_ldexp(half_pi, half_pi, -1); // divide by 2
0402    // Remove even multiples of pi.
0403    if (xx.compare(half_pi) > 0)
0404    {
0405       eval_divide(t, xx, half_pi);
0406       eval_trunc(n_pi, t);
0407       //
0408       // If n_pi is > 1/epsilon, then it is no longer an exact integer value
0409       // but an approximation.  As a result we can no longer reliably reduce
0410       // xx to 0 <= xx < pi/2, nor can we tell the sign of the result as we need
0411       // n_pi % 4 for that, but that will always be zero in this situation.
0412       // We could use a higher precision type for n_pi, along with division at
0413       // higher precision, but that's rather expensive.  So for now we do not support
0414       // this, and will see if anyone complains and has a legitimate use case.
0415       //
0416       if (n_pi.compare(get_constant_one_over_epsilon<T>()) > 0)
0417       {
0418          result = ui_type(1);
0419          return;
0420       }
0421       BOOST_MATH_INSTRUMENT_CODE(n_pi.str(0, std::ios_base::scientific));
0422       t = ui_type(4);
0423       eval_fmod(t, n_pi, t);
0424 
0425       bool b_go_down = false;
0426       if (t.compare(ui_type(0)) == 0)
0427       {
0428          b_go_down = true;
0429       }
0430       else if (t.compare(ui_type(1)) == 0)
0431       {
0432          b_negate_cos = true;
0433       }
0434       else if (t.compare(ui_type(2)) == 0)
0435       {
0436          b_go_down    = true;
0437          b_negate_cos = true;
0438       }
0439       else
0440       {
0441          BOOST_MP_ASSERT(t.compare(ui_type(3)) == 0);
0442       }
0443 
0444       if (b_go_down)
0445          eval_increment(n_pi);
0446 
0447       reduce_n_half_pi(xx, n_pi, b_go_down);
0448       //
0449       // Post reduction we may be a few ulp below zero or above pi/2
0450       // given that n_pi was calculated at working precision and not
0451       // at the higher precision used for reduction.  Correct that now:
0452       //
0453       if (eval_get_sign(xx) < 0)
0454       {
0455          xx.negate();
0456          b_negate_cos = !b_negate_cos;
0457       }
0458       if (xx.compare(half_pi) > 0)
0459       {
0460          eval_ldexp(half_pi, half_pi, 1);
0461          eval_subtract(xx, half_pi, xx);
0462          eval_ldexp(half_pi, half_pi, -1);
0463       }
0464       BOOST_MP_ASSERT(xx.compare(half_pi) <= 0);
0465       BOOST_MP_ASSERT(xx.compare(ui_type(0)) >= 0);
0466    }
0467    else
0468    {
0469       n_pi = ui_type(1);
0470       reduce_n_half_pi(xx, n_pi, true);
0471    }
0472 
0473    const bool b_zero = eval_get_sign(xx) == 0;
0474 
0475    if (b_zero)
0476    {
0477       result = si_type(0);
0478    }
0479    else
0480    {
0481       eval_sin(result, xx);
0482    }
0483    if (b_negate_cos)
0484       result.negate();
0485    BOOST_MATH_INSTRUMENT_CODE(result.str(0, std::ios_base::scientific));
0486 }
0487 
0488 template <class T>
0489 void eval_tan(T& result, const T& x)
0490 {
0491    static_assert(number_category<T>::value == number_kind_floating_point, "The tan function is only valid for floating point types.");
0492    if (&result == &x)
0493    {
0494       T temp;
0495       eval_tan(temp, x);
0496       result = temp;
0497       return;
0498    }
0499    T t;
0500    eval_sin(result, x);
0501    eval_cos(t, x);
0502    eval_divide(result, t);
0503 }
0504 
0505 template <class T>
0506 void hyp2F1(T& result, const T& a, const T& b, const T& c, const T& x)
0507 {
0508    // Compute the series representation of hyperg_2f1 taken from
0509    // Abramowitz and Stegun 15.1.1.
0510    // There are no checks on input range or parameter boundaries.
0511 
0512    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0513 
0514    T x_pow_n_div_n_fact(x);
0515    T pochham_a(a);
0516    T pochham_b(b);
0517    T pochham_c(c);
0518    T ap(a);
0519    T bp(b);
0520    T cp(c);
0521 
0522    eval_multiply(result, pochham_a, pochham_b);
0523    eval_divide(result, pochham_c);
0524    eval_multiply(result, x_pow_n_div_n_fact);
0525    eval_add(result, ui_type(1));
0526 
0527    T lim;
0528    eval_ldexp(lim, result, 1 - boost::multiprecision::detail::digits2<number<T, et_on> >::value());
0529 
0530    if (eval_get_sign(lim) < 0)
0531       lim.negate();
0532 
0533    ui_type n;
0534    T       term;
0535 
0536    const unsigned series_limit =
0537        boost::multiprecision::detail::digits2<number<T, et_on> >::value() < 100
0538            ? 100
0539            : boost::multiprecision::detail::digits2<number<T, et_on> >::value();
0540    // Series expansion of hyperg_2f1(a, b; c; x).
0541    for (n = 2; n < series_limit; ++n)
0542    {
0543       eval_multiply(x_pow_n_div_n_fact, x);
0544       eval_divide(x_pow_n_div_n_fact, n);
0545 
0546       eval_increment(ap);
0547       eval_multiply(pochham_a, ap);
0548       eval_increment(bp);
0549       eval_multiply(pochham_b, bp);
0550       eval_increment(cp);
0551       eval_multiply(pochham_c, cp);
0552 
0553       eval_multiply(term, pochham_a, pochham_b);
0554       eval_divide(term, pochham_c);
0555       eval_multiply(term, x_pow_n_div_n_fact);
0556       eval_add(result, term);
0557 
0558       if (eval_get_sign(term) < 0)
0559          term.negate();
0560       if (lim.compare(term) >= 0)
0561          break;
0562    }
0563    if (n > series_limit)
0564       BOOST_MP_THROW_EXCEPTION(std::runtime_error("H2F1 failed to converge."));
0565 }
0566 
0567 template <class T>
0568 void eval_asin(T& result, const T& x)
0569 {
0570    static_assert(number_category<T>::value == number_kind_floating_point, "The asin function is only valid for floating point types.");
0571    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0572    using fp_type = typename std::tuple_element<0, typename T::float_types>::type                         ;
0573 
0574    if (&result == &x)
0575    {
0576       T t(x);
0577       eval_asin(result, t);
0578       return;
0579    }
0580 
0581    switch (eval_fpclassify(x))
0582    {
0583    case FP_NAN:
0584    case FP_INFINITE:
0585       BOOST_IF_CONSTEXPR(std::numeric_limits<number<T, et_on> >::has_quiet_NaN)
0586       {
0587          result = std::numeric_limits<number<T, et_on> >::quiet_NaN().backend();
0588          errno  = EDOM;
0589       }
0590       else
0591          BOOST_MP_THROW_EXCEPTION(std::domain_error("Result is undefined or complex and there is no NaN for this number type."));
0592       return;
0593    case FP_ZERO:
0594       result = x;
0595       return;
0596    default:;
0597    }
0598 
0599    const bool b_neg = eval_get_sign(x) < 0;
0600 
0601    T xx(x);
0602    if (b_neg)
0603       xx.negate();
0604 
0605    int c = xx.compare(ui_type(1));
0606    if (c > 0)
0607    {
0608       BOOST_IF_CONSTEXPR(std::numeric_limits<number<T, et_on> >::has_quiet_NaN)
0609       {
0610          result = std::numeric_limits<number<T, et_on> >::quiet_NaN().backend();
0611          errno  = EDOM;
0612       }
0613       else
0614          BOOST_MP_THROW_EXCEPTION(std::domain_error("Result is undefined or complex and there is no NaN for this number type."));
0615       return;
0616    }
0617    else if (c == 0)
0618    {
0619       result = get_constant_pi<T>();
0620       eval_ldexp(result, result, -1);
0621       if (b_neg)
0622          result.negate();
0623       return;
0624    }
0625 
0626    if (xx.compare(fp_type(1e-3)) < 0)
0627    {
0628       // http://functions.wolfram.com/ElementaryFunctions/ArcSin/26/01/01/
0629       eval_multiply(xx, xx);
0630       T t1, t2;
0631       t1 = fp_type(0.5f);
0632       t2 = fp_type(1.5f);
0633       hyp2F1(result, t1, t1, t2, xx);
0634       eval_multiply(result, x);
0635       return;
0636    }
0637    else if (xx.compare(fp_type(1 - 5e-2f)) > 0)
0638    {
0639       // http://functions.wolfram.com/ElementaryFunctions/ArcSin/26/01/01/
0640       // This branch is simlilar in complexity to Newton iterations down to
0641       // the above limit.  It is *much* more accurate.
0642       T dx1;
0643       T t1, t2;
0644       eval_subtract(dx1, ui_type(1), xx);
0645       t1 = fp_type(0.5f);
0646       t2 = fp_type(1.5f);
0647       eval_ldexp(dx1, dx1, -1);
0648       hyp2F1(result, t1, t1, t2, dx1);
0649       eval_ldexp(dx1, dx1, 2);
0650       eval_sqrt(t1, dx1);
0651       eval_multiply(result, t1);
0652       eval_ldexp(t1, get_constant_pi<T>(), -1);
0653       result.negate();
0654       eval_add(result, t1);
0655       if (b_neg)
0656          result.negate();
0657       return;
0658    }
0659 #ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
0660    using guess_type = typename boost::multiprecision::detail::canonical<long double, T>::type;
0661 #else
0662    using guess_type = fp_type;
0663 #endif
0664    // Get initial estimate using standard math function asin.
0665    guess_type dd;
0666    eval_convert_to(&dd, xx);
0667 
0668    result = (guess_type)(std::asin(dd));
0669 
0670    // Newton-Raphson iteration, we should double our precision with each iteration,
0671    // in practice this seems to not quite work in all cases... so terminate when we
0672    // have at least 2/3 of the digits correct on the assumption that the correction
0673    // we've just added will finish the job...
0674 
0675    std::intmax_t current_precision = eval_ilogb(result);
0676    std::intmax_t target_precision  = std::numeric_limits<number<T> >::is_specialized ? 
0677       current_precision - 1 - (std::numeric_limits<number<T> >::digits * 2) / 3
0678       : current_precision - 1 - (boost::multiprecision::detail::digits2<number<T> >::value() * 2) / 3;
0679 
0680    // Newton-Raphson iteration
0681    while (current_precision > target_precision)
0682    {
0683       T sine, cosine;
0684       eval_sin(sine, result);
0685       eval_cos(cosine, result);
0686       eval_subtract(sine, xx);
0687       eval_divide(sine, cosine);
0688       eval_subtract(result, sine);
0689       current_precision = eval_ilogb(sine);
0690       if (current_precision <= (std::numeric_limits<typename T::exponent_type>::min)() + 1)
0691          break;
0692    }
0693    if (b_neg)
0694       result.negate();
0695 }
0696 
0697 template <class T>
0698 inline void eval_acos(T& result, const T& x)
0699 {
0700    static_assert(number_category<T>::value == number_kind_floating_point, "The acos function is only valid for floating point types.");
0701    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0702 
0703    switch (eval_fpclassify(x))
0704    {
0705    case FP_NAN:
0706    case FP_INFINITE:
0707       BOOST_IF_CONSTEXPR(std::numeric_limits<number<T, et_on> >::has_quiet_NaN)
0708       {
0709          result = std::numeric_limits<number<T, et_on> >::quiet_NaN().backend();
0710          errno  = EDOM;
0711       }
0712       else
0713          BOOST_MP_THROW_EXCEPTION(std::domain_error("Result is undefined or complex and there is no NaN for this number type."));
0714       return;
0715    case FP_ZERO:
0716       result = get_constant_pi<T>();
0717       eval_ldexp(result, result, -1); // divide by two.
0718       return;
0719    }
0720 
0721    T xx;
0722    eval_abs(xx, x);
0723    int c = xx.compare(ui_type(1));
0724 
0725    if (c > 0)
0726    {
0727       BOOST_IF_CONSTEXPR(std::numeric_limits<number<T, et_on> >::has_quiet_NaN)
0728       {
0729          result = std::numeric_limits<number<T, et_on> >::quiet_NaN().backend();
0730          errno  = EDOM;
0731       }
0732       else
0733          BOOST_MP_THROW_EXCEPTION(std::domain_error("Result is undefined or complex and there is no NaN for this number type."));
0734       return;
0735    }
0736    else if (c == 0)
0737    {
0738       if (eval_get_sign(x) < 0)
0739          result = get_constant_pi<T>();
0740       else
0741          result = ui_type(0);
0742       return;
0743    }
0744 
0745    using fp_type = typename std::tuple_element<0, typename T::float_types>::type;
0746 
0747    if (xx.compare(fp_type(1e-3)) < 0)
0748    {
0749       // https://functions.wolfram.com/ElementaryFunctions/ArcCos/26/01/01/
0750       eval_multiply(xx, xx);
0751       T t1, t2;
0752       t1 = fp_type(0.5f);
0753       t2 = fp_type(1.5f);
0754       hyp2F1(result, t1, t1, t2, xx);
0755       eval_multiply(result, x);
0756       eval_ldexp(t1, get_constant_pi<T>(), -1);
0757       result.negate();
0758       eval_add(result, t1);
0759       return;
0760    }
0761    if (eval_get_sign(x) < 0)
0762    {
0763       eval_acos(result, xx);
0764       result.negate();
0765       eval_add(result, get_constant_pi<T>());
0766       return;
0767    }
0768    else if (xx.compare(fp_type(0.85)) > 0)
0769    {
0770       // https://functions.wolfram.com/ElementaryFunctions/ArcCos/26/01/01/
0771       // This branch is simlilar in complexity to Newton iterations down to
0772       // the above limit.  It is *much* more accurate.
0773       T dx1;
0774       T t1, t2;
0775       eval_subtract(dx1, ui_type(1), xx);
0776       t1 = fp_type(0.5f);
0777       t2 = fp_type(1.5f);
0778       eval_ldexp(dx1, dx1, -1);
0779       hyp2F1(result, t1, t1, t2, dx1);
0780       eval_ldexp(dx1, dx1, 2);
0781       eval_sqrt(t1, dx1);
0782       eval_multiply(result, t1);
0783       return;
0784    }
0785 
0786 #ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
0787    using guess_type = typename boost::multiprecision::detail::canonical<long double, T>::type;
0788 #else
0789    using guess_type = fp_type;
0790 #endif
0791    // Get initial estimate using standard math function asin.
0792    guess_type dd;
0793    eval_convert_to(&dd, xx);
0794 
0795    result = (guess_type)(std::acos(dd));
0796 
0797    // Newton-Raphson iteration, we should double our precision with each iteration,
0798    // in practice this seems to not quite work in all cases... so terminate when we
0799    // have at least 2/3 of the digits correct on the assumption that the correction
0800    // we've just added will finish the job...
0801 
0802    std::intmax_t current_precision = eval_ilogb(result);
0803    std::intmax_t target_precision = std::numeric_limits<number<T> >::is_specialized ?
0804       current_precision - 1 - (std::numeric_limits<number<T> >::digits * 2) / 3
0805       : current_precision - 1 - (boost::multiprecision::detail::digits2<number<T> >::value() * 2) / 3;
0806 
0807    // Newton-Raphson iteration
0808    while (current_precision > target_precision)
0809    {
0810       T sine, cosine;
0811       eval_sin(sine, result);
0812       eval_cos(cosine, result);
0813       eval_subtract(cosine, xx);
0814       cosine.negate();
0815       eval_divide(cosine, sine);
0816       eval_subtract(result, cosine);
0817       current_precision = eval_ilogb(cosine);
0818       if (current_precision <= (std::numeric_limits<typename T::exponent_type>::min)() + 1)
0819          break;
0820    }
0821 }
0822 
0823 template <class T>
0824 void eval_atan(T& result, const T& x)
0825 {
0826    static_assert(number_category<T>::value == number_kind_floating_point, "The atan function is only valid for floating point types.");
0827    using si_type = typename boost::multiprecision::detail::canonical<std::int32_t, T>::type ;
0828    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0829    using fp_type = typename std::tuple_element<0, typename T::float_types>::type                         ;
0830 
0831    switch (eval_fpclassify(x))
0832    {
0833    case FP_NAN:
0834       result = x;
0835       errno  = EDOM;
0836       return;
0837    case FP_ZERO:
0838       result = x;
0839       return;
0840    case FP_INFINITE:
0841       if (eval_get_sign(x) < 0)
0842       {
0843          eval_ldexp(result, get_constant_pi<T>(), -1);
0844          result.negate();
0845       }
0846       else
0847          eval_ldexp(result, get_constant_pi<T>(), -1);
0848       return;
0849    default:;
0850    }
0851 
0852    const bool b_neg = eval_get_sign(x) < 0;
0853 
0854    T xx(x);
0855    if (b_neg)
0856       xx.negate();
0857 
0858    if (xx.compare(fp_type(0.1)) < 0)
0859    {
0860       T t1, t2, t3;
0861       t1 = ui_type(1);
0862       t2 = fp_type(0.5f);
0863       t3 = fp_type(1.5f);
0864       eval_multiply(xx, xx);
0865       xx.negate();
0866       hyp2F1(result, t1, t2, t3, xx);
0867       eval_multiply(result, x);
0868       return;
0869    }
0870 
0871    if (xx.compare(fp_type(10)) > 0)
0872    {
0873       T t1, t2, t3;
0874       t1 = fp_type(0.5f);
0875       t2 = ui_type(1u);
0876       t3 = fp_type(1.5f);
0877       eval_multiply(xx, xx);
0878       eval_divide(xx, si_type(-1), xx);
0879       hyp2F1(result, t1, t2, t3, xx);
0880       eval_divide(result, x);
0881       if (!b_neg)
0882          result.negate();
0883       eval_ldexp(t1, get_constant_pi<T>(), -1);
0884       eval_add(result, t1);
0885       if (b_neg)
0886          result.negate();
0887       return;
0888    }
0889 
0890    // Get initial estimate using standard math function atan.
0891    fp_type d;
0892    eval_convert_to(&d, xx);
0893    result = fp_type(std::atan(d));
0894 
0895    // Newton-Raphson iteration, we should double our precision with each iteration,
0896    // in practice this seems to not quite work in all cases... so terminate when we
0897    // have at least 2/3 of the digits correct on the assumption that the correction
0898    // we've just added will finish the job...
0899 
0900    std::intmax_t current_precision = eval_ilogb(result);
0901    std::intmax_t target_precision  = std::numeric_limits<number<T> >::is_specialized ?
0902       current_precision - 1 - (std::numeric_limits<number<T> >::digits * 2) / 3
0903       : current_precision - 1 - (boost::multiprecision::detail::digits2<number<T> >::value() * 2) / 3;
0904 
0905    T s, c, t;
0906    while (current_precision > target_precision)
0907    {
0908       eval_sin(s, result);
0909       eval_cos(c, result);
0910       eval_multiply(t, xx, c);
0911       eval_subtract(t, s);
0912       eval_multiply(s, t, c);
0913       eval_add(result, s);
0914       current_precision = eval_ilogb(s);
0915       if (current_precision <= (std::numeric_limits<typename T::exponent_type>::min)() + 1)
0916          break;
0917    }
0918    if (b_neg)
0919       result.negate();
0920 }
0921 
0922 template <class T>
0923 void eval_atan2(T& result, const T& y, const T& x)
0924 {
0925    static_assert(number_category<T>::value == number_kind_floating_point, "The atan2 function is only valid for floating point types.");
0926    if (&result == &y)
0927    {
0928       T temp(y);
0929       eval_atan2(result, temp, x);
0930       return;
0931    }
0932    else if (&result == &x)
0933    {
0934       T temp(x);
0935       eval_atan2(result, y, temp);
0936       return;
0937    }
0938 
0939    using ui_type = typename boost::multiprecision::detail::canonical<std::uint32_t, T>::type;
0940 
0941    switch (eval_fpclassify(y))
0942    {
0943    case FP_NAN:
0944       result = y;
0945       errno  = EDOM;
0946       return;
0947    case FP_ZERO:
0948    {
0949       if (eval_signbit(x))
0950       {
0951          result = get_constant_pi<T>();
0952          if (eval_signbit(y))
0953             result.negate();
0954       }
0955       else
0956       {
0957          result = y; // Note we allow atan2(0,0) to be +-zero, even though it's mathematically undefined
0958       }
0959       return;
0960    }
0961    case FP_INFINITE:
0962    {
0963       if (eval_fpclassify(x) == FP_INFINITE)
0964       {
0965          if (eval_signbit(x))
0966          {
0967             // 3Pi/4
0968             eval_ldexp(result, get_constant_pi<T>(), -2);
0969             eval_subtract(result, get_constant_pi<T>());
0970             if (eval_get_sign(y) >= 0)
0971                result.negate();
0972          }
0973          else
0974          {
0975             // Pi/4
0976             eval_ldexp(result, get_constant_pi<T>(), -2);
0977             if (eval_get_sign(y) < 0)
0978                result.negate();
0979          }
0980       }
0981       else
0982       {
0983          eval_ldexp(result, get_constant_pi<T>(), -1);
0984          if (eval_get_sign(y) < 0)
0985             result.negate();
0986       }
0987       return;
0988    }
0989    }
0990 
0991    switch (eval_fpclassify(x))
0992    {
0993    case FP_NAN:
0994       result = x;
0995       errno  = EDOM;
0996       return;
0997    case FP_ZERO:
0998    {
0999       eval_ldexp(result, get_constant_pi<T>(), -1);
1000       if (eval_get_sign(y) < 0)
1001          result.negate();
1002       return;
1003    }
1004    case FP_INFINITE:
1005       if (eval_get_sign(x) > 0)
1006          result = ui_type(0);
1007       else
1008          result = get_constant_pi<T>();
1009       if (eval_get_sign(y) < 0)
1010          result.negate();
1011       return;
1012    }
1013 
1014    T xx;
1015    eval_divide(xx, y, x);
1016    if (eval_get_sign(xx) < 0)
1017       xx.negate();
1018 
1019    eval_atan(result, xx);
1020 
1021    // Determine quadrant (sign) based on signs of x, y
1022    const bool y_neg = eval_get_sign(y) < 0;
1023    const bool x_neg = eval_get_sign(x) < 0;
1024 
1025    if (y_neg != x_neg)
1026       result.negate();
1027 
1028    if (x_neg)
1029    {
1030       if (y_neg)
1031          eval_subtract(result, get_constant_pi<T>());
1032       else
1033          eval_add(result, get_constant_pi<T>());
1034    }
1035 }
1036 template <class T, class A>
1037 inline typename std::enable_if<boost::multiprecision::detail::is_arithmetic<A>::value, void>::type eval_atan2(T& result, const T& x, const A& a)
1038 {
1039    using canonical_type = typename boost::multiprecision::detail::canonical<A, T>::type         ;
1040    using cast_type = typename std::conditional<std::is_same<A, canonical_type>::value, T, canonical_type>::type;
1041    cast_type                                                                      c;
1042    c = a;
1043    eval_atan2(result, x, c);
1044 }
1045 
1046 template <class T, class A>
1047 inline typename std::enable_if<boost::multiprecision::detail::is_arithmetic<A>::value, void>::type eval_atan2(T& result, const A& x, const T& a)
1048 {
1049    using canonical_type = typename boost::multiprecision::detail::canonical<A, T>::type         ;
1050    using cast_type = typename std::conditional<std::is_same<A, canonical_type>::value, T, canonical_type>::type;
1051    cast_type                                                                      c;
1052    c = x;
1053    eval_atan2(result, c, a);
1054 }
1055 
1056 #ifdef BOOST_MSVC
1057 #pragma warning(pop)
1058 #endif