This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch rhel-7.4
in repository 389-ds-base.
commit 70230bf894d9c0150dca8dc6fccc2712187f7b86
Author: William Brown <firstyear(a)redhat.com>
Date: Mon Mar 13 13:29:43 2017 +1000
Ticket 49164 - Change NS to acq-rel semantics for atomics
Bug Description: We were using seq_cst to guarantee our operations
as a poc. Changing to acq/rel allows us the same guarantees, but
with less overheads.
Fix Description: Change the barrier type.
https://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
https://pagure.io/389-ds-base/issue/49164
Author: wibrown
Review by: mreynolds (Thanks!)
(cherry picked from commit b1b0574d2cdb012ab206999ed51f08d3340386ce)
---
src/nunc-stans/ns/ns_thrpool.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c
index 744749b..a867b39 100644
--- a/src/nunc-stans/ns/ns_thrpool.c
+++ b/src/nunc-stans/ns/ns_thrpool.c
@@ -167,7 +167,7 @@ ns_thrpool_is_shutdown(struct ns_thrpool_t *tp)
{
/* We need to barrier this somehow? */
int32_t result = 0;
- __atomic_load(&(tp->shutdown), &result, __ATOMIC_SEQ_CST);
+ __atomic_load(&(tp->shutdown), &result, __ATOMIC_ACQUIRE);
return result;
}
@@ -176,7 +176,7 @@ ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp)
{
/* We need to barrier this somehow? */
int32_t result = 0;
- __atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_SEQ_CST);
+ __atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_ACQUIRE);
return result;
}
@@ -1402,7 +1402,7 @@ ns_thrpool_destroy(struct ns_thrpool_t *tp)
#endif
if (tp) {
/* Set the flag to shutdown the event loop. */
- __atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_RELEASE);
/* Finish the event queue wakeup job. This has the
* side effect of waking up the event loop thread, which
@@ -1491,7 +1491,7 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp)
}
/* Set the shutdown flag. This will cause the worker
* threads to exit after they finish all remaining work. */
- __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_RELEASE);
/* Wake up the idle worker threads so they can exit. */
pthread_mutex_lock(&(tp->work_q_lock));
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.