Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 18 additions & 12 deletions src/raw_mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,14 +99,13 @@ unsafe impl lock_api::RawMutex for RawMutex {
#[inline]
unsafe fn unlock(&self) {
deadlock::release_resource(self as *const _ as usize);
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
let mut prev = self.state.load(Ordering::Relaxed);
let new_state = prev & !LOCKED_BIT;
prev = self.state.swap(new_state, Ordering::Release);
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's a bug here: you may "forget" a parked thread if another thread sets PARKED_BIT between the load and swap.

Copy link
Author

@BusyJay BusyJay May 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then prev must be set to PARKED_BIT | LOCKED_BIT at L104 and can't pass the check at L105.

if prev == LOCKED_BIT {
return;
}
self.unlock_slow(false);
self.unlock_slow(false, (new_state & PARKED_BIT) == 0);
}

#[inline]
Expand All @@ -127,7 +126,7 @@ unsafe impl lock_api::RawMutexFair for RawMutex {
{
return;
}
self.unlock_slow(true);
self.unlock_slow(true, false);
}

#[inline]
Expand Down Expand Up @@ -289,14 +288,14 @@ impl RawMutex {
}

#[cold]
fn unlock_slow(&self, force_fair: bool) {
fn unlock_slow(&self, force_fair: bool, parked_cleared: bool) {
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// If we are using a fair unlock then we should keep the
// mutex locked and hand it off to the unparked thread.
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
if result.unparked_threads != 0 && force_fair {
// Clear the parked bit if there are no more parked
// threads.
if !result.have_more_threads {
Expand All @@ -308,9 +307,16 @@ impl RawMutex {
// Clear the locked bit, and the parked bit as well if there
// are no more parked threads.
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
if force_fair {
self.state.store(PARKED_BIT, Ordering::Release);
} else if parked_cleared {
// Protected by mutex, must seen by woken threads.
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
}
} else if force_fair {
self.state.store(0, Ordering::Release);
} else if !parked_cleared {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
TOKEN_NORMAL
};
Expand All @@ -325,7 +331,7 @@ impl RawMutex {
#[cold]
fn bump_slow(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
self.unlock_slow(true);
self.unlock_slow(true, false);
self.lock();
}
}
30 changes: 18 additions & 12 deletions src/raw_rwlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,14 +93,13 @@ unsafe impl lock_api::RawRwLock for RawRwLock {
#[inline]
unsafe fn unlock_exclusive(&self) {
self.deadlock_release();
if self
.state
.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
let mut prev = self.state.load(Ordering::Relaxed);
let new_state = prev & !WRITER_BIT;
prev = self.state.swap(new_state, Ordering::Release);
if prev == WRITER_BIT {
return;
}
self.unlock_exclusive_slow(false);
self.unlock_exclusive_slow(false, (new_state & PARKED_BIT) == 0);
}

#[inline]
Expand Down Expand Up @@ -168,7 +167,7 @@ unsafe impl lock_api::RawRwLockFair for RawRwLock {
{
return;
}
self.unlock_exclusive_slow(true);
self.unlock_exclusive_slow(true, false);
}

#[inline]
Expand Down Expand Up @@ -648,12 +647,12 @@ impl RawRwLock {
}

#[cold]
fn unlock_exclusive_slow(&self, force_fair: bool) {
fn unlock_exclusive_slow(&self, force_fair: bool, parked_cleared: bool) {
// There are threads to unpark. Try to unpark as many as we can.
let callback = |mut new_state, result: UnparkResult| {
// If we are using a fair unlock then we should keep the
// rwlock locked and hand it off to the unparked threads.
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
if result.unparked_threads != 0 && force_fair {
if result.have_more_threads {
new_state |= PARKED_BIT;
}
Expand All @@ -662,9 +661,16 @@ impl RawRwLock {
} else {
// Clear the parked bit if there are no more parked threads.
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
if force_fair {
self.state.store(PARKED_BIT, Ordering::Release);
} else if parked_cleared {
// Protected by mutex, must seen by woken threads.
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
}
} else if force_fair {
self.state.store(0, Ordering::Release);
} else if !parked_cleared {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
TOKEN_NORMAL
}
Expand Down Expand Up @@ -916,7 +922,7 @@ impl RawRwLock {
#[cold]
fn bump_exclusive_slow(&self) {
self.deadlock_release();
self.unlock_exclusive_slow(true);
self.unlock_exclusive_slow(true, false);
self.lock_exclusive();
}

Expand Down
Loading