@@ -83,8 +83,8 @@ virtual void virtfunc(void) {
83
83
return arg;
84
84
}
85
85
86
- static volatile int32_t global;
87
- static volatile int64_t w_global;
86
+ static atomic_int global;
87
+ static _Atomic ( int64_t ) w_global;
88
88
89
89
#if TARGET_OS_EMBEDDED
90
90
static const size_t cnt = 5000000 ;
@@ -191,7 +191,7 @@ static void __attribute__((noinline))
191
191
main (void )
192
192
{
193
193
pthread_mutex_t plock = PTHREAD_MUTEX_INITIALIZER;
194
- OSSpinLock slock = OS_SPINLOCK_INIT ;
194
+ os_unfair_lock slock = OS_UNFAIR_LOCK_INIT ;
195
195
BasicObject *bo;
196
196
BasicClass *bc;
197
197
pthread_t pthr_pause;
@@ -219,8 +219,7 @@ static void __attribute__((noinline))
219
219
cycles_per_nanosecond = (long double )freq / (long double )NSEC_PER_SEC;
220
220
221
221
#if BENCH_SLOW
222
- NSAutoreleasePool *pool = [[NSAutoreleasePool alloc ] init ];
223
- assert (pool);
222
+ @autoreleasepool {
224
223
#endif
225
224
226
225
/* Malloc has different logic for threaded apps. */
@@ -371,9 +370,7 @@ static void __attribute__((noinline))
371
370
}
372
371
print_result2 (s, " \" description\" ObjC call:" );
373
372
374
- [pool release ];
375
-
376
- pool = NULL ;
373
+ } // For the autorelease pool
377
374
#endif
378
375
379
376
s = mach_absolute_time ();
@@ -554,30 +551,30 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
554
551
555
552
s = mach_absolute_time ();
556
553
for (i = cnt; i; i--) {
557
- __sync_lock_test_and_set (&global, 0 );
554
+ atomic_xchg (&global, 0 );
558
555
}
559
556
print_result (s, " Atomic xchg:" );
560
557
561
558
s = mach_absolute_time ();
562
559
for (i = cnt; i; i--) {
563
- __sync_val_compare_and_swap (&global, 1 , 0 );
560
+ atomic_cmpxchg (&global, 1 , 0 );
564
561
}
565
562
print_result (s, " Atomic cmpxchg:" );
566
563
567
564
s = mach_absolute_time ();
568
565
for (i = cnt; i; i--) {
569
- __sync_fetch_and_add (&global, 1 );
566
+ atomic_fetch_add (&global, 1 );
570
567
}
571
568
print_result (s, " Atomic increment:" );
572
569
573
570
{
574
- global = 0 ;
575
- volatile int32_t *g = &global;
571
+ global = ATOMIC_VAR_INIT ( 0 ) ;
572
+ atomic_int *g = &global;
576
573
577
574
s = mach_absolute_time ();
578
575
for (i = cnt; i; i--) {
579
576
uint32_t result;
580
- __sync_and_and_fetch (g, 1 );
577
+ atomic_fetch_and (g, 1 );
581
578
result = *g;
582
579
if (result) {
583
580
abort ();
@@ -587,57 +584,58 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
587
584
}
588
585
589
586
{
590
- global = 0 ;
591
- volatile int32_t *g = &global;
587
+ global = ATOMIC_VAR_INIT ( 0 ) ;
588
+ atomic_int *g = &global;
592
589
593
590
s = mach_absolute_time ();
594
591
for (i = cnt; i; i--) {
595
592
uint32_t result;
596
- result = __sync_and_and_fetch (g, 1 );
593
+ result = atomic_fetch_and (g, 1 );
597
594
if (result) {
598
595
abort ();
599
596
}
600
597
}
601
598
print_result (s, " Atomic and-and-fetch, using result:" );
602
599
}
603
600
604
- global = 0 ;
601
+ global = ATOMIC_VAR_INIT ( 0 ) ;
605
602
606
603
s = mach_absolute_time ();
607
604
for (i = cnt; i; i--) {
608
- OSAtomicIncrement32Barrier (&global);
605
+ __c11_atomic_fetch_add (&global, 1 , memory_order_seq_cst );
609
606
}
610
- print_result (s, " OSAtomicIncrement32Barrier :" );
607
+ print_result (s, " atomic_fetch_add with memory_order_seq_cst barrier :" );
611
608
612
- global = 0 ;
609
+ global = ATOMIC_VAR_INIT ( 0 ) ;
613
610
614
611
s = mach_absolute_time ();
615
612
for (i = cnt; i; i--) {
616
- OSAtomicIncrement32 (&global);
613
+ __c11_atomic_fetch_add (&global, 1 , memory_order_relaxed );
617
614
}
618
- print_result (s, " OSAtomicIncrement32 :" );
615
+ print_result (s, " atomic_fetch_add with memory_order_relaxed barrier :" );
619
616
620
- w_global = 0 ;
617
+ w_global = ATOMIC_VAR_INIT ( 0 ) ;
621
618
622
619
s = mach_absolute_time ();
623
620
for (i = cnt; i; i--) {
624
- OSAtomicIncrement64Barrier (&w_global );
621
+ __c11_atomic_fetch_add (&wglobal, 1 , memory_order_seq_cst );
625
622
}
626
- print_result (s, " OSAtomicIncrement64Barrier :" );
623
+ print_result (s, " 64-bit atomic_fetch_add with memory_order_seq_cst barrier :" );
627
624
628
- w_global = 0 ;
625
+ w_global = ATOMIC_VAR_INIT ( 0 ) ;
629
626
630
627
s = mach_absolute_time ();
631
628
for (i = cnt; i; i--) {
632
- OSAtomicIncrement64 (&w_global );
629
+ __c11_atomic_fetch_add (&wglobal, 1 , memory_order_relaxed );
633
630
}
634
- print_result (s, " OSAtomicIncrement64 :" );
631
+ print_result (s, " 64-bit atomic_fetch_add with memory_order_seq_cst barrier :" );
635
632
636
- global = 0 ;
633
+ global = ATOMIC_VAR_INIT ( 0 ) ;
637
634
638
635
s = mach_absolute_time ();
639
636
for (i = cnt; i; i--) {
640
- while (!__sync_bool_compare_and_swap (&global, 0 , 1 )) {
637
+ atomic_int zero = ATOMIC_VAR_INIT (0 );
638
+ while (!atomic_compare_exchange_weak (&global, &zero, 1 )) {
641
639
do {
642
640
#if defined(__i386__) || defined(__x86_64__)
643
641
__asm__ __volatile__ (" pause" );
@@ -646,16 +644,16 @@ __asm__ __volatile__ ("svc 0x80" : "+r" (_r0)
646
644
#endif
647
645
} while (global);
648
646
}
649
- global = 0 ;
647
+ global = ATOMIC_VAR_INIT ( 0 ) ;
650
648
}
651
649
print_result (s, " Inlined spin lock/unlock:" );
652
650
653
651
s = mach_absolute_time ();
654
652
for (i = cnt; i; i--) {
655
- OSSpinLockLock (&slock);
656
- OSSpinLockUnlock (&slock);
653
+ os_unfair_lock_lock (&slock);
654
+ os_unfair_lock_unlock (&slock);
657
655
}
658
- print_result (s, " OSSpinLock/Unlock :" );
656
+ print_result (s, " os_unfair_lock_lock/unlock :" );
659
657
660
658
s = mach_absolute_time ();
661
659
for (i = cnt; i; i--) {
0 commit comments