Skip to content

Commit b0bc0cd

Browse files
committed
Convert deprecated functions to use modern atomic variable handling
Replaced deprecated atomic functions with the recommended C11 equivalents.
1 parent ee39300 commit b0bc0cd

22 files changed

+141
-203
lines changed

src/BlocksRuntime/Block_private.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ enum {
8888
struct Block_byref {
8989
void *isa;
9090
struct Block_byref *forwarding;
91-
volatile int32_t flags; // contains ref count
91+
_Atomic(int32_t) flags; // contains ref count
9292
uint32_t size;
9393
};
9494

src/BlocksRuntime/runtime.c

+14-26
Original file line numberDiff line numberDiff line change
@@ -32,20 +32,7 @@
3232
#define __has_builtin(builtin) 0
3333
#endif
3434

35-
#if __has_builtin(__sync_bool_compare_and_swap)
36-
#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) \
37-
__sync_bool_compare_and_swap(_Ptr, _Old, _New)
38-
#else
39-
#define _CRT_SECURE_NO_WARNINGS 1
40-
#include <Windows.h>
41-
static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi,
42-
int volatile *dst) {
43-
// fixme barrier is overkill -- see objc-os.h
44-
int original = InterlockedCompareExchange((LONG volatile *)dst, newi, oldi);
45-
return (original == oldi);
46-
}
47-
#endif
48-
35+
#include <stdatomic.h>
4936
/***********************
5037
Globals
5138
************************/
@@ -64,21 +51,22 @@ Internal Utilities
6451
********************************************************************************/
6552

6653

67-
static int32_t latching_incr_int(volatile int32_t *where) {
54+
static int32_t latching_incr_int(_Atomic(int32_t) *where) {
6855
while (1) {
69-
int32_t old_value = *where;
56+
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
7057
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
7158
return BLOCK_REFCOUNT_MASK;
7259
}
73-
if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
60+
61+
if (atomic_compare_exchange_weak(where, &old_value, old_value + 2)) {
7462
return old_value+2;
7563
}
7664
}
7765
}
7866

79-
static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
67+
static bool latching_incr_int_not_deallocating(_Atomic(int32_t) *where) {
8068
while (1) {
81-
int32_t old_value = *where;
69+
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
8270
if (old_value & BLOCK_DEALLOCATING) {
8371
// if deallocating we can't do this
8472
return false;
@@ -87,7 +75,7 @@ static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
8775
// if latched, we're leaking this block, and we succeed
8876
return true;
8977
}
90-
if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
78+
if (atomic_compare_exchange_weak(where, &old_value, old_value + 2)) {
9179
// otherwise, we must store a new retained value without the deallocating bit set
9280
return true;
9381
}
@@ -96,9 +84,9 @@ static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
9684

9785

9886
// return should_deallocate?
99-
static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
87+
static bool latching_decr_int_should_deallocate(_Atomic(int32_t) *where) {
10088
while (1) {
101-
int32_t old_value = *where;
89+
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
10290
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
10391
return false; // latched high
10492
}
@@ -111,24 +99,24 @@ static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
11199
new_value = old_value - 1;
112100
result = true;
113101
}
114-
if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
102+
if (atomic_compare_exchange_weak(where, &old_value, new_value)) {
115103
return result;
116104
}
117105
}
118106
}
119107

120108
// hit zero?
121-
static bool latching_decr_int_now_zero(volatile int32_t *where) {
109+
static bool latching_decr_int_now_zero(_Atomic(int32_t) *where) {
122110
while (1) {
123-
int32_t old_value = *where;
111+
int32_t old_value = atomic_load_explicit(where, memory_order_relaxed);
124112
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
125113
return false; // latched high
126114
}
127115
if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
128116
return false; // underflow, latch low
129117
}
130118
int32_t new_value = old_value - 2;
131-
if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
119+
if (atomic_compare_exchange_weak(where, &old_value, new_value)) {
132120
return (new_value & BLOCK_REFCOUNT_MASK) == 0;
133121
}
134122
}

src/allocator.c

+19-17
Original file line numberDiff line numberDiff line change
@@ -542,31 +542,33 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
542542
}
543543
// They are all unallocated, so we could madvise the page. Try to
544544
// take ownership of them all.
545-
int last_locked = 0;
546-
do {
547-
if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
545+
for (i = 0; i < BITMAPS_PER_PAGE; i++) {
546+
if (!os_atomic_cmpxchg(&page_bitmaps[i], BITMAP_C(0),
548547
BITMAP_ALL_ONES, relaxed)) {
549548
// We didn't get one; since there is a cont allocated in
550549
// the page, we can't madvise. Give up and unlock all.
551-
goto unlock;
550+
break;
552551
}
553-
} while (++last_locked < (signed)BITMAPS_PER_PAGE);
552+
}
553+
554+
if (i >= BITMAPS_PER_PAGE) {
554555
#if DISPATCH_DEBUG
555-
//fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
556-
// "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
557-
// last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
558-
// Scribble to expose use-after-free bugs
559-
// madvise (syscall) flushes these stores
560-
memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
556+
// fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
557+
// "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
558+
// last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
559+
// Scribble to expose use-after-free bugs
560+
// madvise (syscall) flushes these stores
561+
memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
561562
#endif
562-
(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
563-
MADV_FREE));
563+
// madvise the page
564+
(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
565+
MADV_FREE));
566+
}
564567

565-
unlock:
566-
while (last_locked > 1) {
567-
page_bitmaps[--last_locked] = BITMAP_C(0);
568+
while (i > 1) {
569+
page_bitmaps[--i] = BITMAP_C(0);
568570
}
569-
if (last_locked) {
571+
if (i) {
570572
os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
571573
}
572574
return;

tests/Foundation/bench.mm

+33-35
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ virtual void virtfunc(void) {
8383
return arg;
8484
}
8585

86-
static volatile int32_t global;
87-
static volatile int64_t w_global;
86+
static atomic_int global;
87+
static _Atomic(int64_t) w_global;
8888

8989
#if TARGET_OS_EMBEDDED
9090
static const size_t cnt = 5000000;
@@ -191,7 +191,7 @@ static void __attribute__((noinline))
191191
main(void)
192192
{
193193
pthread_mutex_t plock = PTHREAD_MUTEX_INITIALIZER;
194-
OSSpinLock slock = OS_SPINLOCK_INIT;
194+
os_unfair_lock slock = OS_UNFAIR_LOCK_INIT;
195195
BasicObject *bo;
196196
BasicClass *bc;
197197
pthread_t pthr_pause;
@@ -219,8 +219,7 @@ static void __attribute__((noinline))
219219
cycles_per_nanosecond = (long double)freq / (long double)NSEC_PER_SEC;
220220

221221
#if BENCH_SLOW
222-
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
223-
assert(pool);
222+
@autoreleasepool {
224223
#endif
225224

226225
/* Malloc has different logic for threaded apps. */
@@ -371,9 +370,7 @@ static void __attribute__((noinline))
371370
}
372371
print_result2(s, "\"description\" ObjC call:");
373372

374-
[pool release];
375-
376-
pool = NULL;
373+
} // For the autorelease pool
377374
#endif
378375

379376
s = mach_absolute_time();
@@ -554,30 +551,30 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
554551

555552
s = mach_absolute_time();
556553
for (i = cnt; i; i--) {
557-
__sync_lock_test_and_set(&global, 0);
554+
atomic_xchg(&global, 0);
558555
}
559556
print_result(s, "Atomic xchg:");
560557

561558
s = mach_absolute_time();
562559
for (i = cnt; i; i--) {
563-
__sync_val_compare_and_swap(&global, 1, 0);
560+
atomic_cmpxchg(&global, 1, 0);
564561
}
565562
print_result(s, "Atomic cmpxchg:");
566563

567564
s = mach_absolute_time();
568565
for (i = cnt; i; i--) {
569-
__sync_fetch_and_add(&global, 1);
566+
atomic_fetch_add(&global, 1);
570567
}
571568
print_result(s, "Atomic increment:");
572569

573570
{
574-
global = 0;
575-
volatile int32_t *g = &global;
571+
global = ATOMIC_VAR_INIT(0);
572+
atomic_int *g = &global;
576573

577574
s = mach_absolute_time();
578575
for (i = cnt; i; i--) {
579576
uint32_t result;
580-
__sync_and_and_fetch(g, 1);
577+
atomic_fetch_and(g, 1);
581578
result = *g;
582579
if (result) {
583580
abort();
@@ -587,57 +584,58 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
587584
}
588585

589586
{
590-
global = 0;
591-
volatile int32_t *g = &global;
587+
global = ATOMIC_VAR_INIT(0);
588+
atomic_int *g = &global;
592589

593590
s = mach_absolute_time();
594591
for (i = cnt; i; i--) {
595592
uint32_t result;
596-
result = __sync_and_and_fetch(g, 1);
593+
result = atomic_fetch_and(g, 1);
597594
if (result) {
598595
abort();
599596
}
600597
}
601598
print_result(s, "Atomic and-and-fetch, using result:");
602599
}
603600

604-
global = 0;
601+
global = ATOMIC_VAR_INIT(0);
605602

606603
s = mach_absolute_time();
607604
for (i = cnt; i; i--) {
608-
OSAtomicIncrement32Barrier(&global);
605+
__c11_atomic_fetch_add(&global, 1, memory_order_seq_cst);
609606
}
610-
print_result(s, "OSAtomicIncrement32Barrier:");
607+
print_result(s, "atomic_fetch_add with memory_order_seq_cst barrier:");
611608

612-
global = 0;
609+
global = ATOMIC_VAR_INIT(0);
613610

614611
s = mach_absolute_time();
615612
for (i = cnt; i; i--) {
616-
OSAtomicIncrement32(&global);
613+
__c11_atomic_fetch_add(&global, 1, memory_order_relaxed);
617614
}
618-
print_result(s, "OSAtomicIncrement32:");
615+
print_result(s, "atomic_fetch_add with memory_order_relaxed barrier:");
619616

620-
w_global = 0;
617+
w_global = ATOMIC_VAR_INIT(0);
621618

622619
s = mach_absolute_time();
623620
for (i = cnt; i; i--) {
624-
OSAtomicIncrement64Barrier(&w_global);
621+
__c11_atomic_fetch_add(&wglobal, 1, memory_order_seq_cst);
625622
}
626-
print_result(s, "OSAtomicIncrement64Barrier:");
623+
print_result(s, "64-bit atomic_fetch_add with memory_order_seq_cst barrier:");
627624

628-
w_global = 0;
625+
w_global = ATOMIC_VAR_INIT(0);
629626

630627
s = mach_absolute_time();
631628
for (i = cnt; i; i--) {
632-
OSAtomicIncrement64(&w_global);
629+
__c11_atomic_fetch_add(&wglobal, 1, memory_order_relaxed);
633630
}
634-
print_result(s, "OSAtomicIncrement64:");
631+
print_result(s, "64-bit atomic_fetch_add with memory_order_seq_cst barrier:");
635632

636-
global = 0;
633+
global = ATOMIC_VAR_INIT(0);
637634

638635
s = mach_absolute_time();
639636
for (i = cnt; i; i--) {
640-
while (!__sync_bool_compare_and_swap(&global, 0, 1)) {
637+
atomic_int zero = ATOMIC_VAR_INIT(0);
638+
while (!atomic_compare_exchange_weak(&global, &zero, 1)) {
641639
do {
642640
#if defined(__i386__) || defined(__x86_64__)
643641
__asm__ __volatile__ ("pause");
@@ -646,16 +644,16 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
646644
#endif
647645
} while (global);
648646
}
649-
global = 0;
647+
global = ATOMIC_VAR_INIT(0);
650648
}
651649
print_result(s, "Inlined spin lock/unlock:");
652650

653651
s = mach_absolute_time();
654652
for (i = cnt; i; i--) {
655-
OSSpinLockLock(&slock);
656-
OSSpinLockUnlock(&slock);
653+
os_unfair_lock_lock(&slock);
654+
os_unfair_lock_unlock(&slock);
657655
}
658-
print_result(s, "OSSpinLock/Unlock:");
656+
print_result(s, "os_unfair_lock_lock/unlock:");
659657

660658
s = mach_absolute_time();
661659
for (i = cnt; i; i--) {

tests/Foundation/dispatch_apply_gc.m

+5-6
Original file line numberDiff line numberDiff line change
@@ -30,26 +30,25 @@
3030
#else
3131
const size_t final = 1000, desclen = 8892;
3232
#endif
33-
NSAutoreleasePool *pool = nil;
3433

3534
static void
3635
work(void* ctxt __attribute__((unused)))
3736
{
38-
pool = [[NSAutoreleasePool alloc] init];
37+
@autoreleasepool {
3938
NSMutableArray *a = [NSMutableArray array];
40-
OSSpinLock sl = OS_SPINLOCK_INIT, *l = &sl;
39+
os_unfair_lock sl = OS_UNFAIR_LOCK_INIT, *l = &sl;
4140

4241
dispatch_apply(final, dispatch_get_global_queue(0, 0), ^(size_t i){
4342
NSDecimalNumber *n = [NSDecimalNumber decimalNumberWithDecimal:
4443
[[NSNumber numberWithInteger:i] decimalValue]];
45-
OSSpinLockLock(l);
44+
os_unfair_lock_lock(l);
4645
[a addObject:n];
47-
OSSpinLockUnlock(l);
46+
os_unfair_lock_unlock(l);
4847
});
4948
test_long("count", [a count], final);
5049
test_long("description length", [[a description] length], desclen);
5150
a = nil;
52-
[pool drain];
51+
}
5352
test_stop_after_delay((void*)(intptr_t)1);
5453
}
5554

tests/Foundation/nsoperation.m

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ - (void)main
5454
{
5555
dispatch_test_start("NSOperation");
5656

57-
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
57+
@ autoreleasepool {
5858

5959
NSOperationQueue *queue = [[[NSOperationQueue alloc] init] autorelease];
6060
test_ptr_notnull("NSOperationQueue", queue);
@@ -67,7 +67,7 @@ - (void)main
6767

6868
[[NSRunLoop mainRunLoop] run];
6969

70-
[pool release];
70+
}
7171

7272
return 0;
7373
}

tests/dispatch_after.c

-3
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,6 @@
2525
#endif
2626
#include <stdlib.h>
2727
#include <assert.h>
28-
#ifdef __APPLE__
29-
#include <libkern/OSAtomic.h>
30-
#endif
3128

3229
#include <bsdtests.h>
3330
#include <Block.h>

0 commit comments

Comments
 (0)