diff --git a/include/hx/StdLibs.h b/include/hx/StdLibs.h index d661c2698..aa2751a32 100644 --- a/include/hx/StdLibs.h +++ b/include/hx/StdLibs.h @@ -305,6 +305,154 @@ bool _hx_atomic_exchange_if(::cpp::Pointer inPtr, int test, int int _hx_atomic_inc(::cpp::Pointer inPtr ); int _hx_atomic_dec(::cpp::Pointer inPtr ); +// Assumptions made: +// People are not using 8 year old versions of GCC. + +#if defined(__GNUC__) || defined(__clang__) +#define HX_GCC_ATOMICS +#define HX_HAS_ATOMIC 1 +#elif defined(_MSC_VER) +#define HX_MSVC_ATOMICS +#define HX_HAS_ATOMIC 1 +#include +#else +#define HX_HAS_ATOMIC 0 +#endif + +inline int _hx_atomic_add(volatile int *a, int b) { +#if defined(HX_GCC_ATOMICS) + return __atomic_fetch_add(a, b, __ATOMIC_SEQ_CST); +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedExchangeAdd((long volatile *)a, b); +#else + int old = *a; + *a += b; + return old; +#endif +} + +inline int _hx_atomic_sub(volatile int *a, int b) { +#if defined(HX_GCC_ATOMICS) + return __atomic_fetch_sub(a, b, __ATOMIC_SEQ_CST); +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedExchangeAdd((long volatile *)a, -b); +#else + int old = *a; + *a -= b; + return old; +#endif +} + +inline int _hx_atomic_and(volatile int *a, int b) { +#if defined(HX_GCC_ATOMICS) + return __atomic_fetch_and(a, b, __ATOMIC_SEQ_CST); +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedAnd((long volatile *)a, b); +#else + int old = *a; + *a &= b; + return old; +#endif +} + +inline int _hx_atomic_or(volatile int *a, int b) { +#if defined(HX_GCC_ATOMICS) + return __atomic_fetch_or(a, b, __ATOMIC_SEQ_CST); +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedOr((long volatile *)a, b); +#else + int old = *a; + *a |= b; + return old; +#endif +} + +inline int _hx_atomic_xor(int *a, int b) { +#if defined(HX_GCC_ATOMICS) + return __atomic_fetch_xor(a, b, __ATOMIC_SEQ_CST); +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedXor((long volatile *)a, b); +#else + int old = *a; + *a ^= b; + return old; +#endif +} + +inline int _hx_atomic_compare_exchange(volatile int *a, int expected, + int replacement) { +#if defined(HX_GCC_ATOMICS) + int _expected = expected; + __atomic_compare_exchange(a, &_expected, &replacement, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return _expected; +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedCompareExchange((long volatile *)a, replacement, expected); +#else + int old = *a; + if(old == expected) { + *a = replacement; + } + return old; +#endif +} + +inline int _hx_atomic_exchange(volatile int *a, int replacement) { +#if defined(HX_GCC_ATOMICS) + int ret = 0; + __atomic_exchange(a, &replacement, &ret, __ATOMIC_SEQ_CST); + return ret; +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedExchange((long volatile *)a, replacement); +#else + int old = *a; + *a = replacement; + return old; +#endif +} + +inline int _hx_atomic_load(volatile int *a) { +#if defined(HX_GCC_ATOMICS) + int ret = 0; + __atomic_load(a, &ret, __ATOMIC_SEQ_CST); + return ret; +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedXor((long volatile *)a, 0); +#else + return *a; +#endif +} + +inline int _hx_atomic_store(volatile int *a, int value) { +#if defined(HX_GCC_ATOMICS) + __atomic_store(a, &value, __ATOMIC_SEQ_CST); + return value; +#elif defined(HX_MSVC_ATOMICS) + _InterlockedExchange((long volatile *)a, value); + return value; +#else + *a = value; + return value; +#endif +} + +inline void* _hx_atomic_compare_exchange_ptr(volatile void **a, void *expected, void* replacement) { +#if defined(HX_GCC_ATOMICS) + void* _expected = expected; + __atomic_compare_exchange(a, (volatile void **)&_expected, (volatile void**)&replacement, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return _expected; +#elif defined(HX_MSVC_ATOMICS) + return _InterlockedCompareExchangePointer((void *volatile *)a, replacement, expected); +#else + void *old = *a; + *a = replacement; + return old; +#endif +} + +inline void* _hx_atomic_compare_exchange_cast_ptr(void *a, void *expected, void *replacement) { + return _hx_atomic_compare_exchange_ptr((volatile void **)a, expected, replacement); +} + Array __hxcpp_get_call_stack(bool inSkipLast); Array __hxcpp_get_exception_stack(); #define HXCPP_HAS_CLASSLIST diff --git a/include/hx/Thread.h b/include/hx/Thread.h index 9998de3ea..b2c3bbfb4 100644 --- a/include/hx/Thread.h +++ b/include/hx/Thread.h @@ -41,118 +41,6 @@ #undef RegisterClass #endif -#if defined(ANDROID) - -#define HX_HAS_ATOMIC 1 - -#if (HXCPP_ANDROID_PLATFORM>=16) -// Nice one, google, no one was using that. -#define __ATOMIC_INLINE__ static __inline__ __attribute__((always_inline)) -// returns 0=exchange took place, 1=not -__ATOMIC_INLINE__ int __atomic_cmpxchg(int old, int _new, volatile int *ptr) - { return __sync_val_compare_and_swap(ptr, old, _new) != old; } -__ATOMIC_INLINE__ int __atomic_dec(volatile int *ptr) { return __sync_fetch_and_sub (ptr, 1); } -__ATOMIC_INLINE__ int __atomic_inc(volatile int *ptr) { return __sync_fetch_and_add (ptr, 1); } -#else -#include -#endif - -// returns 1 if exchange took place -inline bool HxAtomicExchangeIf(int inTest, int inNewVal,volatile int *ioWhere) - { return !__atomic_cmpxchg(inTest, inNewVal, ioWhere); } -inline bool HxAtomicExchangeIfPtr(void *inTest, void *inNewVal,void * volatile *ioWhere) - { return __sync_val_compare_and_swap(ioWhere, inTest, inNewVal)==inTest; } - -// Returns old value naturally -inline int HxAtomicInc(volatile int *ioWhere) - { return __atomic_inc(ioWhere); } -inline int HxAtomicDec(volatile int *ioWhere) - { return __atomic_dec(ioWhere); } - - -#elif defined(HX_WINDOWS) - -inline bool HxAtomicExchangeIf(int inTest, int inNewVal,volatile int *ioWhere) - { return InterlockedCompareExchange((volatile LONG *)ioWhere, inNewVal, inTest)==inTest; } - -inline bool HxAtomicExchangeIfPtr(void *inTest, void *inNewVal,void *volatile *ioWhere) - { return InterlockedCompareExchangePointer(ioWhere, inNewVal, inTest)==inTest; } - -// Make it return old value -inline int HxAtomicInc(volatile int *ioWhere) - { return InterlockedIncrement((volatile LONG *)ioWhere)-1; } -inline int HxAtomicDec(volatile int *ioWhere) - { return InterlockedDecrement((volatile LONG *)ioWhere)+1; } - -#define HX_HAS_ATOMIC 1 - -#elif defined(HX_MACOS) || defined(IPHONE) || defined(APPLETV) -#include - -#define HX_HAS_ATOMIC 1 - -inline bool HxAtomicExchangeIf(int inTest, int inNewVal,volatile int *ioWhere) - { return OSAtomicCompareAndSwap32Barrier(inTest, inNewVal, ioWhere); } -inline bool HxAtomicExchangeIfPtr(void *inTest, void *inNewVal,void * volatile *ioWhere) - { return OSAtomicCompareAndSwapPtrBarrier(inTest, inNewVal, ioWhere); } -inline int HxAtomicInc(volatile int *ioWhere) - { return OSAtomicIncrement32Barrier(ioWhere)-1; } -inline int HxAtomicDec(volatile int *ioWhere) - { return OSAtomicDecrement32Barrier(ioWhere)+1; } - - -#elif defined(HX_LINUX) - -#define HX_HAS_ATOMIC 1 - -inline bool HxAtomicExchangeIf(int inTest, int inNewVal,volatile int *ioWhere) - { return __sync_bool_compare_and_swap(ioWhere, inTest, inNewVal); } -inline bool HxAtomicExchangeIfPtr(void *inTest, void *inNewVal,void *volatile *ioWhere) - { return __sync_bool_compare_and_swap(ioWhere, inTest, inNewVal); } -// Returns old value naturally -inline int HxAtomicInc(volatile int *ioWhere) - { return __sync_fetch_and_add(ioWhere,1); } -inline int HxAtomicDec(volatile int *ioWhere) - { return __sync_fetch_and_sub(ioWhere,1); } - -#else - -#define HX_HAS_ATOMIC 0 - -inline bool HxAtomicExchangeIfPtr(void *inTest, void *inNewVal,void *volatile *ioWhere) -{ - if (*ioWhere == inTest) - { - *ioWhere = inNewVal; - return true; - } - return false; -} - - -inline int HxAtomicExchangeIf(int inTest, int inNewVal,volatile int *ioWhere) -{ - if (*ioWhere == inTest) - { - *ioWhere = inNewVal; - return true; - } - return false; -} -inline int HxAtomicInc(volatile int *ioWhere) - { return (*ioWhere)++; } -inline int HxAtomicDec(volatile int *ioWhere) - { return (*ioWhere)--; } - - -#endif - -inline bool HxAtomicExchangeIfCastPtr(void *inTest, void *inNewVal,void *ioWhere) -{ - return HxAtomicExchangeIfPtr(inTest, inNewVal, (void *volatile *)ioWhere); -} - - #if defined(HX_WINDOWS) diff --git a/src/String.cpp b/src/String.cpp index 52029eba5..bb7226ea7 100644 --- a/src/String.cpp +++ b/src/String.cpp @@ -1172,7 +1172,7 @@ const ::String &::String::makePermanent() const { unsigned int myHash = hash(); { - while(! HxAtomicExchangeIf(0,1,&sPermanentStringSetMutex) ) + while(_hx_atomic_compare_exchange(&sPermanentStringSetMutex, 0, 1) != 0) __hxcpp_gc_safe_point(); TNonGcStringSet *element = sPermanentStringSet->find(myHash , *this); sPermanentStringSetMutex = 0; @@ -1198,7 +1198,7 @@ const ::String &::String::makePermanent() const const_cast(this)->__s = s; } - while(! HxAtomicExchangeIf(0,1,&sPermanentStringSetMutex) ) + while(_hx_atomic_compare_exchange(&sPermanentStringSetMutex, 0, 1) != 0) __hxcpp_gc_safe_point(); sPermanentStringSet->set(*this,null()); sPermanentStringSetMutex = 0; diff --git a/src/hx/Thread.cpp b/src/hx/Thread.cpp index 5aff7c867..0dedb89d9 100644 --- a/src/hx/Thread.cpp +++ b/src/hx/Thread.cpp @@ -930,17 +930,17 @@ int __hxcpp_GetCurrentThreadNumber() bool _hx_atomic_exchange_if(::cpp::Pointer inPtr, int test, int newVal ) { - return HxAtomicExchangeIf(test, newVal, inPtr); + return _hx_atomic_compare_exchange(inPtr, test, newVal) == test; } int _hx_atomic_inc(::cpp::Pointer inPtr ) { - return HxAtomicInc(inPtr); + return _hx_atomic_add(inPtr, 1); } int _hx_atomic_dec(::cpp::Pointer inPtr ) { - return HxAtomicDec(inPtr); + return _hx_atomic_sub(inPtr, 1); } diff --git a/src/hx/gc/Immix.cpp b/src/hx/gc/Immix.cpp index bdee325ef..da502d404 100644 --- a/src/hx/gc/Immix.cpp +++ b/src/hx/gc/Immix.cpp @@ -859,7 +859,7 @@ struct BlockDataInfo if (mZeroed) return false; - if (HxAtomicExchangeIf(0,1,&mZeroLock)) + if (_hx_atomic_compare_exchange(&mZeroLock, 0,1) == 0) return zeroAndUnlock(); return false; @@ -1469,7 +1469,7 @@ void GCOnNewPointer(void *inPtr) { #ifdef HXCPP_GC_DEBUG_ALWAYS_MOVE hx::sgPointerMoved.erase(inPtr); - HxAtomicInc(&sgAllocsSinceLastSpam); + _hx_atomic_add(&sgAllocsSinceLastSpam, 1); #endif } @@ -1502,7 +1502,7 @@ struct GlobalChunks { MarkChunk *head = (MarkChunk *)processList; inChunk->next = head; - if (HxAtomicExchangeIfCastPtr(head, inChunk, &processList)) + if (_hx_atomic_compare_exchange_cast_ptr(&processList, head, inChunk) == head) break; } @@ -1515,12 +1515,12 @@ struct GlobalChunks { MarkChunk *head = (MarkChunk *)processList; inChunk->next = head; - if (HxAtomicExchangeIfCastPtr(head, inChunk, &processList)) + if (_hx_atomic_compare_exchange_cast_ptr(&processList, head, inChunk) == head) break; } #ifdef PROFILE_THREAD_USAGE - HxAtomicInc(&sThreadChunkPushCount); + _hx_atomic_add(&sThreadChunkPushCount, 1); #endif if (MAX_GC_THREADS>1 && sLazyThreads) @@ -1623,7 +1623,7 @@ struct GlobalChunks { MarkChunk *head = (MarkChunk *)freeList; inChunk->next = head; - if (HxAtomicExchangeIfCastPtr(head, inChunk, &freeList)) + if (_hx_atomic_compare_exchange_cast_ptr(&freeList, head, inChunk) == head) return; } } @@ -1634,11 +1634,11 @@ struct GlobalChunks if (inChunk) release(inChunk); - while( !HxAtomicExchangeIf(0,1,&processListPopLock) ) + while(_hx_atomic_compare_exchange(&processListPopLock, 0, 1) != 0) { // Spin #ifdef PROFILE_THREAD_USAGE - HxAtomicInc(&sSpinCount); + _hx_atomic_add(&sSpinCount, 1); #endif } @@ -1651,7 +1651,7 @@ struct GlobalChunks return 0; } MarkChunk *next = head->next; - if (HxAtomicExchangeIfCastPtr(head, next, &processList)) + if (_hx_atomic_compare_exchange_cast_ptr(&processList, head, next) == head) { processListPopLock = 0; @@ -1717,11 +1717,11 @@ struct GlobalChunks inline MarkChunk *alloc() { - while( !HxAtomicExchangeIf(0,1,&freeListPopLock) ) + while(_hx_atomic_compare_exchange(&freeListPopLock, 0, 1) != 0) { // Spin #ifdef PROFILE_THREAD_USAGE - HxAtomicInc(&sSpinCount); + _hx_atomic_add(&sSpinCount, 1); #endif } @@ -1734,7 +1734,7 @@ struct GlobalChunks return new MarkChunk; } MarkChunk *next = head->next; - if (HxAtomicExchangeIfCastPtr(head, next, &freeList)) + if (_hx_atomic_compare_exchange_cast_ptr(&freeList, head, next) == head) { freeListPopLock = 0; @@ -2047,7 +2047,7 @@ void MarkAllocUnchecked(void *inPtr,hx::MarkContext *__inCtx) unsigned int *pos = info->allocStart + startRow; unsigned int val = *pos; - while(!HxAtomicExchangeIf(val,val|gImmixStartFlag[start&127], (volatile int *)pos)) + while(_hx_atomic_compare_exchange((volatile int *)pos, val,val|gImmixStartFlag[start&127]) != val) val = *pos; #ifdef HXCPP_GC_GENERATIONAL @@ -2132,7 +2132,7 @@ void MarkObjectAllocUnchecked(hx::Object *inPtr,hx::MarkContext *__inCtx) unsigned int *pos = info->allocStart + startRow; unsigned int val = *pos; - while(!HxAtomicExchangeIf(val,val|gImmixStartFlag[start&127], (volatile int *)pos)) + while(_hx_atomic_compare_exchange( (volatile int *)pos, val, val|gImmixStartFlag[start&127]) != val) val = *pos; #ifdef HXCPP_GC_GENERATIONAL info->mHasSurvivor = true; @@ -3364,7 +3364,7 @@ class GlobalAllocator if (!info->mOwned && info->mMaxHoleSize>=inRequiredBytes) { // Acquire the zero-lock - if (HxAtomicExchangeIf(0,1,&info->mZeroLock)) + if (_hx_atomic_compare_exchange(&info->mZeroLock, 0, 1) == 0) { // Acquire ownership... if (info->mOwned) @@ -3380,7 +3380,7 @@ class GlobalAllocator int idx = nextFreeBlock; while(idxmOwned) { - HxAtomicExchangeIf(idx,idx+1,mNextFreeBlockOfSize+sizeSlot); + _hx_atomic_compare_exchange(mNextFreeBlockOfSize+sizeSlot, idx, idx+1); idx++; } @@ -3392,7 +3392,7 @@ class GlobalAllocator else { if (!info->mZeroed) - HxAtomicInc(&sThreadZeroMisses); + _hx_atomic_add(&sThreadZeroMisses, 1); } #endif } @@ -4279,7 +4279,7 @@ class GlobalAllocator { while(!sgThreadPoolAbort) { - int blockId = HxAtomicInc( &mThreadJobId ); + int blockId = _hx_atomic_add(&mThreadJobId, 1); if (blockId>=mAllBlocks.size()) break; @@ -4294,7 +4294,7 @@ class GlobalAllocator { while(!sgThreadPoolAbort) { - int blockId = HxAtomicInc( &mThreadJobId ); + int blockId = _hx_atomic_add(&mThreadJobId, 1); if (blockId>=mAllBlocks.size()) break; @@ -4307,7 +4307,7 @@ class GlobalAllocator { while(!sgThreadPoolAbort) { - int blockId = HxAtomicInc( &mThreadJobId ); + int blockId = _hx_atomic_add(&mThreadJobId, 1); if (blockId>=mAllBlocks.size()) break; @@ -4322,7 +4322,7 @@ class GlobalAllocator { while(!sgThreadPoolAbort) { - int blockId = HxAtomicInc( &mThreadJobId ); + int blockId = _hx_atomic_add(&mThreadJobId, 1); if (blockId>=mAllBlocks.size()) break; @@ -4336,7 +4336,7 @@ class GlobalAllocator { while(!sgThreadPoolAbort) { - int zeroListId = HxAtomicInc( &mThreadJobId ); + int zeroListId = _hx_atomic_add(&mThreadJobId, 1); if (zeroListId>=mZeroList.size()) break; @@ -4362,7 +4362,7 @@ class GlobalAllocator spinCount = 0; // Look at next block... - int zeroListId = HxAtomicInc( &mThreadJobId ); + int zeroListId = _hx_atomic_add(&mThreadJobId, 1); if (zeroListId>=mZeroList.size()) { // Done, so sleep... @@ -4373,7 +4373,7 @@ class GlobalAllocator if (info->tryZero()) { // We zeroed it, so increase queue count - HxAtomicInc(&mZeroListQueue); + _hx_atomic_add(&mZeroListQueue, 1); #ifdef PROFILE_THREAD_USAGE sThreadBlockZeroCount++; #endif @@ -4387,7 +4387,7 @@ class GlobalAllocator void onZeroedBlockDequeued() { // Wake the thread? - if (HxAtomicDec(&mZeroListQueue) 0xffffffff then we are the collector // otherwise, someone else is collecting at the moment - so wait... - if (!HxAtomicExchangeIf(0, 0xffffffff,(volatile int *)&hx::gPauseForCollect)) + if (_hx_atomic_compare_exchange((volatile int *)&hx::gPauseForCollect, 0, 0xffffffff) != 0) { if (inLocked) { @@ -6568,7 +6568,7 @@ void *InternalNew(int inSize,bool inIsObject) //GCLOG("InternalNew spam\n"); CollectFromThisThread(false,false); } - HxAtomicInc(&sgAllocsSinceLastSpam); + _hx_atomic_add(&sgAllocsSinceLastSpam, 1); #endif if (inSize>=IMMIX_LARGE_OBJ_SIZE) @@ -6679,7 +6679,7 @@ void *InternalRealloc(int inFromSize, void *inData,int inSize, bool inExpand) //GCLOG("InternalNew spam\n"); CollectFromThisThread(false,false); } - HxAtomicInc(&sgAllocsSinceLastSpam); + _hx_atomic_add(&sgAllocsSinceLastSpam, 1); #endif void *new_data = 0;