aboutsummaryrefslogtreecommitdiff
path: root/src/projections
diff options
context:
space:
mode:
authorEven Rouault <even.rouault@spatialys.com>2020-08-19 14:25:28 +0200
committerGitHub <noreply@github.com>2020-08-19 14:25:28 +0200
commit62ad09fe02c4a67e727822134768e4ebe6c8cde4 (patch)
tree9eceacdf5853933ddce244a93d35bf1e6bea3adb /src/projections
parente1dc2d5516562c2e6e9668325d897c0eccdc6d2a (diff)
downloadPROJ-62ad09fe02c4a67e727822134768e4ebe6c8cde4.tar.gz
PROJ-62ad09fe02c4a67e727822134768e4ebe6c8cde4.zip
Revert compiler generated Fused Multiply Addition optimized routines (#2327)
Fixes #2326 Partially reverts commit b84c9d0cb61f3bd561da6092e15e294ae7e410e0 to remove the use of the gcc 6 mechanism of generated multiple versions of functions with different optimization flags, which was found to causes crashes when dlopen'ing PROJ on CentOS 7.8 with gcc 8.3.1
Diffstat (limited to 'src/projections')
-rw-r--r--src/projections/tmerc.cpp28
1 files changed, 2 insertions, 26 deletions
diff --git a/src/projections/tmerc.cpp b/src/projections/tmerc.cpp
index 3a58fc02..0c567e13 100644
--- a/src/projections/tmerc.cpp
+++ b/src/projections/tmerc.cpp
@@ -66,15 +66,6 @@ struct tmerc_data {
/* Constant for "exact" transverse mercator */
#define PROJ_ETMERC_ORDER 6
-// Determine if we should try to provide optimized versions for the Fused Multiply Addition
-// Intel instruction set. We use GCC 6 __attribute__((target_clones("fma","default")))
-// mechanism for that, where the compiler builds a default version, and one that
-// uses FMA. And at runtimes it figures out automatically which version can be used
-// by the current CPU. This allows to create general purpose binaries.
-#if defined(TARGET_CLONES_FMA_ALLOWED) && defined(__GNUC__) && __GNUC__ >= 6 && defined(__x86_64__) && !defined(__FMA__)
-#define BUILD_FMA_OPTIMIZED_VERSION
-#endif
-
/*****************************************************************************/
//
// Approximate Transverse Mercator functions
@@ -82,10 +73,7 @@ struct tmerc_data {
/*****************************************************************************/
-#ifdef BUILD_FMA_OPTIMIZED_VERSION
-__attribute__((target_clones("fma","default")))
-#endif
-inline static PJ_XY approx_e_fwd_internal (PJ_LP lp, PJ *P)
+static PJ_XY approx_e_fwd (PJ_LP lp, PJ *P)
{
PJ_XY xy = {0.0, 0.0};
const auto *Q = &(static_cast<struct tmerc_data*>(P->opaque)->approx);
@@ -127,11 +115,6 @@ inline static PJ_XY approx_e_fwd_internal (PJ_LP lp, PJ *P)
return (xy);
}
-static PJ_XY approx_e_fwd (PJ_LP lp, PJ *P)
-{
- return approx_e_fwd_internal(lp, P);
-}
-
static PJ_XY approx_s_fwd (PJ_LP lp, PJ *P) {
PJ_XY xy = {0.0,0.0};
double b, cosphi;
@@ -177,10 +160,7 @@ static PJ_XY approx_s_fwd (PJ_LP lp, PJ *P) {
return xy;
}
-#ifdef BUILD_FMA_OPTIMIZED_VERSION
-__attribute__((target_clones("fma","default")))
-#endif
-inline static PJ_LP approx_e_inv_internal (PJ_XY xy, PJ *P) {
+static PJ_LP approx_e_inv (PJ_XY xy, PJ *P) {
PJ_LP lp = {0.0,0.0};
const auto *Q = &(static_cast<struct tmerc_data*>(P->opaque)->approx);
@@ -212,10 +192,6 @@ inline static PJ_LP approx_e_inv_internal (PJ_XY xy, PJ *P) {
return lp;
}
-static PJ_LP approx_e_inv (PJ_XY xy, PJ *P) {
- return approx_e_inv_internal(xy, P);
-}
-
static PJ_LP approx_s_inv (PJ_XY xy, PJ *P) {
PJ_LP lp = {0.0, 0.0};
double h, g;