Skip to content

Commit 50fdd46

Browse files
authored
[EASTL 3.17.02] (#395)
eastl::atomic<T> - fix all the spelling mistakes in the doc - Added support for non-trivially default constructible types - Cleaned up comments and impl - improved 128-bit load code gen - fixed type pun to support non-trivially default constructible types - ensure msvc instrinics do not emit prefetch instructions EASTL: to_array implementation EASTL: fix for rbtree input iterator ctor moving elements from the source container
1 parent 7bd4505 commit 50fdd46

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+5048
-4314
lines changed

include/EASTL/array.h

Lines changed: 44 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,9 @@ namespace eastl
4343
/// Implements a templated array class as per the C++ standard TR1.
4444
/// This class allows you to use a built-in C style array like an STL vector.
4545
/// It does not let you change its size, as it is just like a C built-in array.
46-
/// Our implementation here strives to remove function call nesting, as that
46+
/// Our implementation here strives to remove function call nesting, as that
4747
/// makes it hard for us to profile debug builds due to function call overhead.
48-
/// Note that this is intentionally a struct with public data, as per the
48+
/// Note that this is intentionally a struct with public data, as per the
4949
/// C++ standard update proposal requirements.
5050
///
5151
/// Example usage:
@@ -75,19 +75,19 @@ namespace eastl
7575
count = N
7676
};
7777

78-
// Note that the member data is intentionally public.
79-
// This allows for aggregate initialization of the
80-
// object (e.g. array<int, 5> a = { 0, 3, 2, 4 }; )
78+
// Note that the member data is intentionally public.
79+
// This allows for aggregate initialization of the
80+
// object (e.g. array<int, 5> a = { 0, 3, 2, 4 }; )
8181
value_type mValue[N ? N : 1];
8282

8383
public:
8484
// We intentionally provide no constructor, destructor, or assignment operator.
8585

8686
void fill(const value_type& value);
8787

88-
// Unlike the swap function for other containers, array::swap takes linear time,
88+
// Unlike the swap function for other containers, array::swap takes linear time,
8989
// may exit via an exception, and does not cause iterators to become associated with the other container.
90-
void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<value_type>::value);
90+
void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<value_type>::value);
9191

9292
EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT;
9393
EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT;
@@ -318,7 +318,7 @@ namespace eastl
318318

319319

320320
template <typename T, size_t N>
321-
EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
321+
EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
322322
array<T, N>::front() const
323323
{
324324
#if EASTL_ASSERT_ENABLED
@@ -382,7 +382,7 @@ namespace eastl
382382
#endif
383383

384384
EA_ANALYSIS_ASSUME(i < N);
385-
return static_cast<const_reference>(mValue[i]);
385+
return static_cast<const_reference>(mValue[i]);
386386
}
387387

388388

@@ -479,6 +479,41 @@ namespace eastl
479479
}
480480

481481

482+
///////////////////////////////////////////////////////////////////////
483+
// to_array
484+
///////////////////////////////////////////////////////////////////////
485+
namespace internal
486+
{
487+
template<class T, size_t N, size_t... I>
488+
EA_CONSTEXPR auto to_array(T (&a)[N], index_sequence<I...>)
489+
{
490+
return eastl::array<eastl::remove_cv_t<T>, N>{{a[I]...}};
491+
}
492+
493+
template<class T, size_t N, size_t... I>
494+
EA_CONSTEXPR auto to_array(T (&&a)[N], index_sequence<I...>)
495+
{
496+
return eastl::array<eastl::remove_cv_t<T>, N>{{eastl::move(a[I])...}};
497+
}
498+
}
499+
500+
template<class T, size_t N>
501+
EA_CONSTEXPR eastl::array<eastl::remove_cv_t<T>, N> to_array(T (&a)[N])
502+
{
503+
static_assert(eastl::is_constructible_v<T, T&>, "element type T must be copy-initializable");
504+
static_assert(!eastl::is_array_v<T>, "passing multidimensional arrays to to_array is ill-formed");
505+
return internal::to_array(a, eastl::make_index_sequence<N>{});
506+
}
507+
508+
template<class T, size_t N>
509+
EA_CONSTEXPR eastl::array<eastl::remove_cv_t<T>, N> to_array(T (&&a)[N])
510+
{
511+
static_assert(eastl::is_move_constructible_v<T>, "element type T must be move-constructible");
512+
static_assert(!eastl::is_array_v<T>, "passing multidimensional arrays to to_array is ill-formed");
513+
return internal::to_array(eastl::move(a), eastl::make_index_sequence<N>{});
514+
}
515+
516+
482517
} // namespace eastl
483518

484519

include/EASTL/atomic.h

Lines changed: 80 additions & 84 deletions
Large diffs are not rendered by default.

include/EASTL/internal/atomic/arch/arm/arch_arm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,12 @@
5353
* NOTE:
5454
*
5555
* On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses
56-
* a dmb instead of control dependencie + isb to ensure the IRIW litmus test is satisfied
56+
* a dmb instead of a control dependency + isb to ensure the IRIW litmus test is satisfied
5757
* as one reason. See EASTL/atomic.h for futher explanation and deep-dive.
5858
*
5959
* For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant.
6060
* All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used
61-
* heavily. Most of the ARM code will end up going thru clang or gcc since microsft arm devices
61+
* heavily. Most of the ARM code will end up going thru clang or gcc since microsoft arm devices
6262
* aren't that abundant.
6363
*/
6464

include/EASTL/internal/atomic/arch/arm/arch_arm_load.h

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
*/
3030
#if defined(EA_PROCESSOR_ARM32)
3131

32-
#define EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(ret, ptr) \
33-
ret = __ldrexd(ptr)
32+
#define EASTL_ARCH_ATOMIC_ARM32_LDREXD(ret, ptr) \
33+
ret = __ldrexd((ptr))
3434

3535
#endif
3636

@@ -60,7 +60,7 @@
6060
#define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \
6161
{ \
6262
__int64 loadRet64; \
63-
EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \
63+
EASTL_ARCH_ATOMIC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \
6464
\
6565
ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \
6666
}
@@ -75,6 +75,7 @@
7575

7676
/**
7777
* NOTE:
78+
*
7879
* The ARM documentation states the following:
7980
* A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity
8081
*
@@ -83,22 +84,13 @@
8384
*/
8485
#define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \
8586
{ \
86-
struct BitfieldPun128 \
87-
{ \
88-
__int64 value[2]; \
89-
}; \
90-
\
91-
struct BitfieldPun128 loadedPun = EASTL_ATOMIC_TYPE_PUN_CAST(struct BitfieldPun128, *(ptr)); \
92-
\
87+
bool cmpxchgRetBool; \
88+
ret = *(ptr); \
9389
do \
9490
{ \
95-
bool cmpxchgRetBool; \
96-
EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(struct BitfieldPun128, cmpxchgRetBool, \
97-
EASTL_ATOMIC_TYPE_CAST(struct BitfieldPun128, (ptr)), \
98-
&loadedPun, loadedPun); \
91+
EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, \
92+
ptr, &(ret), ret); \
9993
} while (!cmpxchgRetBool); \
100-
\
101-
ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadedPun); \
10294
}
10395

10496

include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434

3535
/**
3636
* NOTE:
37+
*
3738
* While it makes no sense for a hardware memory barrier to not imply a compiler barrier.
3839
* MSVC docs do not explicitly state that, so better to be safe than sorry chasing down
3940
* hard to find bugs due to the compiler deciding to reorder things.

include/EASTL/internal/atomic/arch/x86/arch_x86.h

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,14 @@
5454

5555
/**
5656
* NOTE:
57+
*
5758
* On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer
58-
* and any i686 class of x86 CPUs support only 64-bit cmpxchg
59+
* and any i586 class of x86 CPUs support only 64-bit cmpxchg
5960
* known as cmpxchg8b.
60-
* On these class of cpus we can guarantee that 64-bit loads are
61-
* also atomic by using the SSE1/SSE2 movq instructions.
61+
*
62+
* On these class of cpus we can guarantee that 64-bit loads/stores are
63+
* also atomic by using the SSE2 movq, SSE1 movlps, or x87 fild/fstp instructions.
64+
*
6265
* We support all other atomic operations
6366
* on compilers that only provide this 64-bit cmpxchg instruction
6467
* by wrapping them around the 64-bit cmpxchg8b instruction.
@@ -91,21 +94,26 @@
9194

9295
/**
9396
* NOTE:
97+
*
9498
* 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b.
99+
*
95100
* We support all other atomic operations by wrapping them around
96101
* the 128-bit cmpxchg16b instruction.
97-
* 128-bit loads are only atomic if using cmpxchg16b on x64.
102+
*
103+
* 128-bit loads are only atomic by using the cmpxchg16b instruction.
104+
* SSE 128-bit loads are not guaranteed to be atomic even though some CPUs
105+
* make them atomic such as AMD Ryzen or Intel SandyBridge.
98106
*/
99107
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
100108

101109

102-
#define EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
103-
static_assert(false, "EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
110+
#define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
111+
static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
104112

105-
#define EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
113+
#define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
106114

107115

108-
#define EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
116+
#define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
109117
{ \
110118
bool cmpxchgRet; \
111119
/* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \

include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -57,37 +57,37 @@
5757
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
5858

5959

60-
#define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
60+
#define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
6161
ret = ((observed) + (val))
6262

63-
#define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
63+
#define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
6464
ret = ((prevObserved) + (val))
6565

6666

6767
#define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
68-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
69-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
70-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
68+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
69+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
70+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
7171

7272
#define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
73-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
74-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
75-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
73+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
74+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
75+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
7676

7777
#define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
78-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
79-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
80-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
78+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
79+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
80+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
8181

8282
#define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
83-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
84-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
85-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
83+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
84+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
85+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
8686

8787
#define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
88-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
89-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
90-
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
88+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
89+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
90+
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
9191

9292

9393
#endif

include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -57,37 +57,37 @@
5757
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
5858

5959

60-
#define EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
60+
#define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
6161
ret = ((observed) & (val))
6262

63-
#define EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
63+
#define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
6464
ret = ((prevObserved) & (val))
6565

6666

6767
#define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
68-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
69-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
70-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
68+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
69+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
70+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
7171

7272
#define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
73-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
74-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
75-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
73+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
74+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
75+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
7676

7777
#define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
78-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
79-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
80-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
78+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
79+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
80+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
8181

8282
#define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
83-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
84-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
85-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
83+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
84+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
85+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
8686

8787
#define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
88-
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
89-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
90-
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
88+
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
89+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
90+
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
9191

9292

9393
#endif

include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,49 +18,49 @@
1818
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
1919

2020

21-
#define EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
21+
#define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
2222
{ \
2323
/* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
2424
__asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \
2525
"sete %3" /* If ZF == 1, set the return value to 1 */ \
2626
/* Output Operands */ \
27-
: "=a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "=d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)), \
27+
: "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]), \
2828
"+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \
2929
"=rm"((ret)) \
3030
/* Input Operands */ \
31-
: "b"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))), "c"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)) + 1)), \
32-
"a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)) \
31+
: "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[1]), \
32+
"a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]) \
3333
/* Clobbers */ \
3434
: "memory", "cc"); \
3535
}
3636

3737

3838
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
39-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
39+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
4040

4141
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
42-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
42+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
4343

4444
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
45-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
45+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
4646

4747
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
48-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
48+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
4949

5050
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
51-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
51+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
5252

5353
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
54-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
54+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
5555

5656
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
57-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
57+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
5858

5959
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
60-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
60+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
6161

6262
#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
63-
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
63+
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
6464

6565

6666
#endif

0 commit comments

Comments
 (0)