aboutsummaryrefslogtreecommitdiff
path: root/ports/berkeleydb/fix-conflict-macro.patch
diff options
context:
space:
mode:
Diffstat (limited to 'ports/berkeleydb/fix-conflict-macro.patch')
-rw-r--r--ports/berkeleydb/fix-conflict-macro.patch123
1 files changed, 123 insertions, 0 deletions
diff --git a/ports/berkeleydb/fix-conflict-macro.patch b/ports/berkeleydb/fix-conflict-macro.patch
new file mode 100644
index 000000000..c15d7edff
--- /dev/null
+++ b/ports/berkeleydb/fix-conflict-macro.patch
@@ -0,0 +1,123 @@
+diff --git a/dbinc/atomic.h b/dbinc/atomic.h
+index 0034dcc..2dd5e03 100644
+--- a/dbinc/atomic.h
++++ b/dbinc/atomic.h
+@@ -70,7 +70,7 @@ typedef struct {
+ * These have no memory barriers; the caller must include them when necessary.
+ */
+ #define atomic_read(p) ((p)->value)
+-#define atomic_init(p, val) ((p)->value = (val))
++#define bdb_atomic_init(p, val) ((p)->value = (val))
+
+ #ifdef HAVE_ATOMIC_SUPPORT
+
+@@ -206,7 +206,7 @@ static inline int __atomic_compare_exchange(
+ #define atomic_dec(env, p) (--(p)->value)
+ #define atomic_compare_exchange(env, p, oldval, newval) \
+ (DB_ASSERT(env, atomic_read(p) == (oldval)), \
+- atomic_init(p, (newval)), 1)
++ bdb_atomic_init(p, (newval)), 1)
+ #else
+ #define atomic_inc(env, p) __atomic_inc(env, p)
+ #define atomic_dec(env, p) __atomic_dec(env, p)
+diff --git a/mp/mp_fget.c b/mp/mp_fget.c
+index 5fdee5a..452ef17 100644
+--- a/mp/mp_fget.c
++++ b/mp/mp_fget.c
+@@ -617,7 +617,7 @@ alloc: /* Allocate a new buffer header and data space. */
+
+ /* Initialize enough so we can call __memp_bhfree. */
+ alloc_bhp->flags = 0;
+- atomic_init(&alloc_bhp->ref, 1);
++ bdb_atomic_init(&alloc_bhp->ref, 1);
+ #ifdef DIAGNOSTIC
+ if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
+ __db_errx(env,
+@@ -911,7 +911,7 @@ alloc: /* Allocate a new buffer header and data space. */
+ MVCC_MPROTECT(bhp->buf, mfp->stat.st_pagesize,
+ PROT_READ);
+
+- atomic_init(&alloc_bhp->ref, 1);
++ bdb_atomic_init(&alloc_bhp->ref, 1);
+ MUTEX_LOCK(env, alloc_bhp->mtx_buf);
+ alloc_bhp->priority = bhp->priority;
+ alloc_bhp->pgno = bhp->pgno;
+diff --git a/mp/mp_mvcc.c b/mp/mp_mvcc.c
+index 34467d2..b604388 100644
+--- a/mp/mp_mvcc.c
++++ b/mp/mp_mvcc.c
+@@ -276,7 +276,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp)
+ #else
+ memcpy(frozen_bhp, bhp, SSZA(BH, buf));
+ #endif
+- atomic_init(&frozen_bhp->ref, 0);
++ bdb_atomic_init(&frozen_bhp->ref, 0);
+ if (mutex != MUTEX_INVALID)
+ frozen_bhp->mtx_buf = mutex;
+ else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH,
+@@ -428,7 +428,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp)
+ #endif
+ alloc_bhp->mtx_buf = mutex;
+ MUTEX_LOCK(env, alloc_bhp->mtx_buf);
+- atomic_init(&alloc_bhp->ref, 1);
++ bdb_atomic_init(&alloc_bhp->ref, 1);
+ F_CLR(alloc_bhp, BH_FROZEN);
+ }
+
+diff --git a/mp/mp_region.c b/mp/mp_region.c
+index e6cece9..e3e3382 100644
+--- a/mp/mp_region.c
++++ b/mp/mp_region.c
+@@ -224,7 +224,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
+ MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0)
+ return (ret);
+ SH_TAILQ_INIT(&htab[i].hash_bucket);
+- atomic_init(&htab[i].hash_page_dirty, 0);
++ bdb_atomic_init(&htab[i].hash_page_dirty, 0);
+ }
+
+ /*
+@@ -269,7 +269,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
+ hp->mtx_hash = (mtx_base == MUTEX_INVALID) ? MUTEX_INVALID :
+ mtx_base + i;
+ SH_TAILQ_INIT(&hp->hash_bucket);
+- atomic_init(&hp->hash_page_dirty, 0);
++ bdb_atomic_init(&hp->hash_page_dirty, 0);
+ #ifdef HAVE_STATISTICS
+ hp->hash_io_wait = 0;
+ hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0;
+diff --git a/mutex/mut_method.c b/mutex/mut_method.c
+index 2588763..56b6723 100644
+--- a/mutex/mut_method.c
++++ b/mutex/mut_method.c
+@@ -426,7 +426,7 @@ atomic_compare_exchange(env, v, oldval, newval)
+ MUTEX_LOCK(env, mtx);
+ ret = atomic_read(v) == oldval;
+ if (ret)
+- atomic_init(v, newval);
++ bdb_atomic_init(v, newval);
+ MUTEX_UNLOCK(env, mtx);
+
+ return (ret);
+diff --git a/mutex/mut_tas.c b/mutex/mut_tas.c
+index f3922e0..934a654 100644
+--- a/mutex/mut_tas.c
++++ b/mutex/mut_tas.c
+@@ -46,7 +46,7 @@ __db_tas_mutex_init(env, mutex, flags)
+
+ #ifdef HAVE_SHARED_LATCHES
+ if (F_ISSET(mutexp, DB_MUTEX_SHARED))
+- atomic_init(&mutexp->sharecount, 0);
++ bdb_atomic_init(&mutexp->sharecount, 0);
+ else
+ #endif
+ if (MUTEX_INIT(&mutexp->tas)) {
+@@ -486,7 +486,7 @@ __db_tas_mutex_unlock(env, mutex)
+ F_CLR(mutexp, DB_MUTEX_LOCKED);
+ /* Flush flag update before zeroing count */
+ MEMBAR_EXIT();
+- atomic_init(&mutexp->sharecount, 0);
++ bdb_atomic_init(&mutexp->sharecount, 0);
+ } else {
+ DB_ASSERT(env, sharecount > 0);
+ MEMBAR_EXIT();