Skip to content

Commit 7916db7

Browse files
committed
wolfcrypt/src/wc_port.c and wolfssl/wolfcrypt/wc_port.h: change precedence of atomic implementations, and don't use the stdatomic.h in C++ builds (not compatible);
fix the name of the wolfSSL_Atomic_Ptr_CompareExchange() implementation in the _MSC_VER code path.
1 parent c430cc7 commit 7916db7

File tree

2 files changed

+59
-55
lines changed

2 files changed

+59
-55
lines changed

wolfcrypt/src/wc_port.c

Lines changed: 49 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1271,82 +1271,80 @@ char* wc_strdup_ex(const char *src, int memType) {
12711271
}
12721272
#endif
12731273

1274-
#if defined(WOLFSSL_ATOMIC_OPS) && !defined(SINGLE_THREADED)
1274+
#ifdef WOLFSSL_ATOMIC_OPS
12751275

12761276
#if defined(WOLFSSL_USER_DEFINED_ATOMICS)
12771277

1278-
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H)
1278+
#elif defined(SINGLE_THREADED)
1279+
1280+
#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED)
1281+
/* direct calls using gcc-style compiler built-ins */
12791282

1280-
/* Default C Implementation */
12811283
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
12821284
{
1283-
atomic_init(c, i);
1285+
*c = i;
12841286
}
12851287

12861288
void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i)
12871289
{
1288-
atomic_init(c, i);
1290+
*c = i;
12891291
}
12901292

12911293
int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i)
12921294
{
1293-
return atomic_fetch_add_explicit(c, i, memory_order_relaxed);
1295+
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
12941296
}
12951297

12961298
int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i)
12971299
{
1298-
return atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
1300+
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
12991301
}
13001302

13011303
int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i)
13021304
{
1303-
int ret = atomic_fetch_add_explicit(c, i, memory_order_relaxed);
1304-
return ret + i;
1305+
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
13051306
}
13061307

13071308
int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i)
13081309
{
1309-
int ret = atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
1310-
return ret - i;
1310+
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
13111311
}
13121312

1313-
int wolfSSL_Atomic_Int_CompareExchange(
1314-
wolfSSL_Atomic_Int* c, int *expected_i, int new_i)
1313+
int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i,
1314+
int new_i)
13151315
{
13161316
/* For the success path, use full synchronization with barriers --
13171317
* "Sequentially-consistent ordering" -- so that all threads see the same
13181318
* "single total modification order of all atomic operations" -- but on
13191319
* failure we just need to be sure we acquire the value that changed out
13201320
* from under us.
13211321
*/
1322-
return atomic_compare_exchange_strong_explicit(
1323-
c, expected_i, new_i, memory_order_seq_cst, memory_order_acquire);
1322+
return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */,
1323+
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
13241324
}
13251325

13261326
unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c,
13271327
unsigned int i)
13281328
{
1329-
return atomic_fetch_add_explicit(c, i, memory_order_relaxed);
1329+
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
13301330
}
13311331

13321332
unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c,
13331333
unsigned int i)
13341334
{
1335-
return atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
1335+
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
13361336
}
13371337

13381338
unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c,
13391339
unsigned int i)
13401340
{
1341-
unsigned int ret = atomic_fetch_add_explicit(c, i, memory_order_relaxed);
1342-
return ret + i;
1341+
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
13431342
}
13441343

13451344
unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
13461345
unsigned int i)
13471346
{
1348-
unsigned int ret = atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
1349-
return ret - i;
1347+
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
13501348
}
13511349

13521350
int wolfSSL_Atomic_Uint_CompareExchange(
@@ -1358,90 +1356,91 @@ int wolfSSL_Atomic_Uint_CompareExchange(
13581356
* failure we just need to be sure we acquire the value that changed out
13591357
* from under us.
13601358
*/
1361-
return atomic_compare_exchange_strong_explicit(
1362-
c, expected_i, new_i, memory_order_seq_cst, memory_order_acquire);
1359+
return __atomic_compare_exchange_n(
1360+
c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
13631361
}
13641362

13651363
int wolfSSL_Atomic_Ptr_CompareExchange(
13661364
void **c, void **expected_ptr, void *new_ptr)
13671365
{
1368-
/* use gcc-built-in __atomic_compare_exchange_n(), not
1369-
* atomic_compare_exchange_strong_explicit(), to sidestep _Atomic type
1370-
* requirements.
1371-
*/
13721366
return __atomic_compare_exchange_n(
13731367
c, expected_ptr, new_ptr, 0 /* weak */,
13741368
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
13751369
}
13761370

1377-
#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED)
1378-
/* direct calls using gcc-style compiler built-ins */
1371+
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) && \
1372+
!defined(__cplusplus)
13791373

1374+
/* Default C Implementation */
13801375
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
13811376
{
1382-
*c = i;
1377+
atomic_init(c, i);
13831378
}
13841379

13851380
void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i)
13861381
{
1387-
*c = i;
1382+
atomic_init(c, i);
13881383
}
13891384

13901385
int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i)
13911386
{
1392-
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
1387+
return atomic_fetch_add_explicit(c, i, memory_order_relaxed);
13931388
}
13941389

13951390
int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i)
13961391
{
1397-
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
1392+
return atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
13981393
}
13991394

14001395
int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i)
14011396
{
1402-
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
1397+
int ret = atomic_fetch_add_explicit(c, i, memory_order_relaxed);
1398+
return ret + i;
14031399
}
14041400

14051401
int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i)
14061402
{
1407-
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
1403+
int ret = atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
1404+
return ret - i;
14081405
}
14091406

1410-
int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i,
1411-
int new_i)
1407+
int wolfSSL_Atomic_Int_CompareExchange(
1408+
wolfSSL_Atomic_Int* c, int *expected_i, int new_i)
14121409
{
14131410
/* For the success path, use full synchronization with barriers --
14141411
* "Sequentially-consistent ordering" -- so that all threads see the same
14151412
* "single total modification order of all atomic operations" -- but on
14161413
* failure we just need to be sure we acquire the value that changed out
14171414
* from under us.
14181415
*/
1419-
return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */,
1420-
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
1416+
return atomic_compare_exchange_strong_explicit(
1417+
c, expected_i, new_i, memory_order_seq_cst, memory_order_acquire);
14211418
}
14221419

14231420
unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c,
14241421
unsigned int i)
14251422
{
1426-
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
1423+
return atomic_fetch_add_explicit(c, i, memory_order_relaxed);
14271424
}
14281425

14291426
unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c,
14301427
unsigned int i)
14311428
{
1432-
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
1429+
return atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
14331430
}
14341431

14351432
unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c,
14361433
unsigned int i)
14371434
{
1438-
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
1435+
unsigned int ret = atomic_fetch_add_explicit(c, i, memory_order_relaxed);
1436+
return ret + i;
14391437
}
14401438

14411439
unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
14421440
unsigned int i)
14431441
{
1444-
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
1442+
unsigned int ret = atomic_fetch_sub_explicit(c, i, memory_order_relaxed);
1443+
return ret - i;
14451444
}
14461445

14471446
int wolfSSL_Atomic_Uint_CompareExchange(
@@ -1453,13 +1452,17 @@ int wolfSSL_Atomic_Uint_CompareExchange(
14531452
* failure we just need to be sure we acquire the value that changed out
14541453
* from under us.
14551454
*/
1456-
return __atomic_compare_exchange_n(
1457-
c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
1455+
return atomic_compare_exchange_strong_explicit(
1456+
c, expected_i, new_i, memory_order_seq_cst, memory_order_acquire);
14581457
}
14591458

14601459
int wolfSSL_Atomic_Ptr_CompareExchange(
14611460
void **c, void **expected_ptr, void *new_ptr)
14621461
{
1462+
/* use gcc-built-in __atomic_compare_exchange_n(), not
1463+
* atomic_compare_exchange_strong_explicit(), to sidestep _Atomic type
1464+
* requirements.
1465+
*/
14631466
return __atomic_compare_exchange_n(
14641467
c, expected_ptr, new_ptr, 0 /* weak */,
14651468
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
@@ -1557,7 +1560,7 @@ int wolfSSL_Atomic_Uint_CompareExchange(
15571560
}
15581561
}
15591562

1560-
int wolfSSL_Atomic_Uint_CompareExchange(
1563+
int wolfSSL_Atomic_Ptr_CompareExchange(
15611564
void ** c, void **expected_ptr, void *new_ptr)
15621565
{
15631566
#ifdef _WIN64

wolfssl/wolfcrypt/wc_port.h

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -494,15 +494,6 @@
494494
#define WOLFSSL_ATOMIC_LOAD(x) (x)
495495
#define WOLFSSL_ATOMIC_STORE(x, val) (x) = (val)
496496
#define WOLFSSL_ATOMIC_OPS
497-
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H)
498-
/* Default C Implementation */
499-
#include <stdatomic.h>
500-
typedef atomic_int wolfSSL_Atomic_Int;
501-
typedef atomic_uint wolfSSL_Atomic_Uint;
502-
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
503-
#define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x))
504-
#define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val)
505-
#define WOLFSSL_ATOMIC_OPS
506497
#elif defined(__GNUC__) && defined(__ATOMIC_CONSUME)
507498
/* direct calls using gcc-style compiler built-ins */
508499
typedef volatile int wolfSSL_Atomic_Int;
@@ -513,6 +504,16 @@
513504
#define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \
514505
val, __ATOMIC_RELEASE)
515506
#define WOLFSSL_ATOMIC_OPS
507+
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) && \
508+
!defined(__cplusplus)
509+
/* Default C Implementation */
510+
#include <stdatomic.h>
511+
typedef atomic_int wolfSSL_Atomic_Int;
512+
typedef atomic_uint wolfSSL_Atomic_Uint;
513+
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
514+
#define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x))
515+
#define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val)
516+
#define WOLFSSL_ATOMIC_OPS
516517
#elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API)
517518
/* Use MSVC compiler intrinsics for atomic ops */
518519
#ifdef _WIN32_WCE

0 commit comments

Comments
 (0)