@@ -55,7 +55,8 @@ static inline ws_array_t *ws_queue_push(ws_queue_t *q, void *elt, int32_t eltsz)
55
55
int64_t t = jl_atomic_load_acquire (& q -> top );
56
56
ws_array_t * ary = jl_atomic_load_relaxed (& q -> array );
57
57
ws_array_t * old_ary = NULL ;
58
- if (__unlikely (b - t > ary -> capacity - 1 )) {
58
+ int64_t size = b - t ;
59
+ if (__unlikely (size > ary -> capacity - 1 )) {
59
60
ws_array_t * new_ary = create_ws_array (2 * ary -> capacity , eltsz );
60
61
for (int i = 0 ; i < ary -> capacity ; i ++ ) {
61
62
memcpy (new_ary -> buffer + ((t + i ) & new_ary -> mask ) * eltsz , ary -> buffer + ((t + i ) & ary -> mask ) * eltsz , eltsz );
@@ -77,9 +78,10 @@ static inline void ws_queue_pop(ws_queue_t *q, void *dest, int32_t eltsz) JL_NOT
77
78
jl_atomic_store_relaxed (& q -> bottom , b );
78
79
jl_fence ();
79
80
int64_t t = jl_atomic_load_relaxed (& q -> top );
80
- if (__likely (t <= b )) {
81
+ int64_t size = b - t + 1 ;
82
+ if (__likely (size > 0 )) {
81
83
memcpy (dest , ary -> buffer + (b & ary -> mask ) * eltsz , eltsz );
82
- if (t == b ) {
84
+ if (size == 1 ) {
83
85
if (!jl_atomic_cmpswap (& q -> top , & t , t + 1 ))
84
86
memset (dest , 0 , eltsz );
85
87
jl_atomic_store_relaxed (& q -> bottom , b + 1 );
@@ -96,7 +98,8 @@ static inline void ws_queue_steal_from(ws_queue_t *q, void *dest, int32_t eltsz)
96
98
int64_t t = jl_atomic_load_acquire (& q -> top );
97
99
jl_fence ();
98
100
int64_t b = jl_atomic_load_acquire (& q -> bottom );
99
- if (t < b ) {
101
+ int64_t size = b - t ;
102
+ if (size > 0 ) {
100
103
ws_array_t * ary = jl_atomic_load_relaxed (& q -> array );
101
104
memcpy (dest , ary -> buffer + (t & ary -> mask ) * eltsz , eltsz );
102
105
if (!jl_atomic_cmpswap (& q -> top , & t , t + 1 ))
0 commit comments