Skip to content

8362239: Reconcile enter_internal and reenter_internal in the ObjectMonitor code #26317

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
330 changes: 160 additions & 170 deletions src/hotspot/share/runtime/objectMonitor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -926,96 +926,143 @@ const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
return ss->base();
}

void ObjectMonitor::enter_internal(JavaThread* current) {
assert(current->thread_state() == _thread_blocked, "invariant");
void ObjectMonitor::enter_internal_self_park(JavaThread* current, bool do_timed_parked) {
static int MAX_RECHECK_INTERVAL = 1000;
int recheck_interval = 1;
// park self
if (do_timed_parked) {
current->_ParkEvent->park((jlong)recheck_interval);
// Increase the recheck_interval, but clamp the value.
recheck_interval *= 8;
if (recheck_interval > MAX_RECHECK_INTERVAL) {
recheck_interval = MAX_RECHECK_INTERVAL;
}
}
else {
current->_ParkEvent->park();
}
}

// Try the lock - TATAS
if (try_lock(current) == TryLockResult::Success) {
assert(!has_successor(current), "invariant");
assert(has_owner(current), "invariant");
return;
void ObjectMonitor::reenter_internal_self_park(JavaThread* current) {
OSThreadContendState osts(current->osthread());
assert(current->thread_state() == _thread_in_vm, "invariant");
{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
current->_ParkEvent->park();
}
}

assert(InitDone, "Unexpectedly not initialized");

// We try one round of spinning *before* enqueueing current.
//
// If the _owner is ready but OFFPROC we could use a YieldTo()
// operation to donate the remainder of this thread's quantum
// to the owner. This has subtle but beneficial affinity
// effects.
void ObjectMonitor::enter_internal(JavaThread* current, ObjectWaiter* currentNode) {
// We use reenter path only for monitor reentry in wait(). It is a contented slow path.
// Reenter path requires currentNode != nullptr, whereas enter path does not need it at all.
const bool enterPath = currentNode == nullptr;
ObjectWaiter node(current);
bool do_timed_parked = false;

if (try_spin(current)) {
assert(has_owner(current), "invariant");
if (enterPath) {
assert(current->thread_state() == _thread_blocked, "invariant");
if (try_lock(current) == TryLockResult::Success) {
assert(!has_successor(current), "invariant");
assert(has_owner(current), "invariant");
return;
}
assert(InitDone, "Unexpectedly not initialized");

// We try one round of spinning *before* enqueueing current.
//
// If the _owner is ready but OFFPROC we could use a YieldTo()
// operation to donate the remainder of this thread's quantum
// to the owner. This has subtle but beneficial affinity
// effects.

if (try_spin(current)) {
assert(has_owner(current), "invariant");
assert(!has_successor(current), "invariant");
return;
}

// The Spin failed -- Enqueue and park the thread ...
assert(!has_successor(current), "invariant");
return;
}
assert(!has_owner(current), "invariant");

// The Spin failed -- Enqueue and park the thread ...
assert(!has_successor(current), "invariant");
assert(!has_owner(current), "invariant");
// Enqueue "current" on ObjectMonitor's _entry_list.
//
// Node acts as a proxy for current.
// As an aside, if were to ever rewrite the synchronization code mostly
// in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
// Java objects. This would avoid awkward lifecycle and liveness issues,
// as well as eliminate a subset of ABA issues.
// TODO: eliminate ObjectWaiter and enqueue either Threads or Events.

// Enqueue "current" on ObjectMonitor's _entry_list.
//
// Node acts as a proxy for current.
// As an aside, if were to ever rewrite the synchronization code mostly
// in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
// Java objects. This would avoid awkward lifecycle and liveness issues,
// as well as eliminate a subset of ABA issues.
// TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
current->_ParkEvent->reset();

ObjectWaiter node(current);
current->_ParkEvent->reset();
if (try_lock_or_add_to_entry_list(current, &node)) {
return; // We got the lock.
}

if (try_lock_or_add_to_entry_list(current, &node)) {
return; // We got the lock.
}
// This thread is now added to the _entry_list.
// This thread is now added to the _entry_list.

// The lock might have been released while this thread was occupied queueing
// itself onto _entry_list. To close the race and avoid "stranding" and
// progress-liveness failure we must resample-retry _owner before parking.
// Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
// In this case the ST-MEMBAR is accomplished with CAS().
//
// TODO: Defer all thread state transitions until park-time.
// Since state transitions are heavy and inefficient we'd like
// to defer the state transitions until absolutely necessary,
// and in doing so avoid some transitions ...

// For virtual threads that are pinned, do a timed-park instead to
// alleviate some deadlocks cases where the succesor is an unmounted
// virtual thread that cannot run. This can happen in particular when
// this virtual thread is currently loading/initializing a class, and
// all other carriers have a vthread pinned to it waiting for said class
// to be loaded/initialized.
static int MAX_RECHECK_INTERVAL = 1000;
int recheck_interval = 1;
bool do_timed_parked = false;
ContinuationEntry* ce = current->last_continuation();
if (ce != nullptr && ce->is_virtual_thread()) {
do_timed_parked = true;
// The lock might have been released while this thread was occupied queueing
// itself onto _entry_list. To close the race and avoid "stranding" and
// progress-liveness failure we must resample-retry _owner before parking.
// Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
// In this case the ST-MEMBAR is accomplished with CAS().
//
// TODO: Defer all thread state transitions until park-time.
// Since state transitions are heavy and inefficient we'd like
// to defer the state transitions until absolutely necessary,
// and in doing so avoid some transitions ...

// For virtual threads that are pinned, do a timed-park instead to
// alleviate some deadlocks cases where the succesor is an unmounted
// virtual thread that cannot run. This can happen in particular when
// this virtual thread is currently loading/initializing a class, and
// all other carriers have a vthread pinned to it waiting for said class
// to be loaded/initialized.

ContinuationEntry* ce = current->last_continuation();
if (ce != nullptr && ce->is_virtual_thread()) {
do_timed_parked = true;
}
} else {
// Reenter path
assert(current != nullptr, "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
assert(currentNode != nullptr, "invariant");
assert(currentNode->_thread == current, "invariant");
assert(_waiters > 0, "invariant");
assert_mark_word_consistency();
}

for (;;) {

if (!enterPath) {
ObjectWaiter::TStates v = currentNode->TState;
guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
}

if (try_lock(current) == TryLockResult::Success) {
break;
}
assert(!has_owner(current), "invariant");

// park self
if (do_timed_parked) {
current->_ParkEvent->park((jlong) recheck_interval);
// Increase the recheck_interval, but clamp the value.
recheck_interval *= 8;
if (recheck_interval > MAX_RECHECK_INTERVAL) {
recheck_interval = MAX_RECHECK_INTERVAL;
}
// If that fails, spin again. Note that spin count may be zero so the above TryLock
// is necessary.
if (!enterPath && try_spin(current)) {
break;
}

if (enterPath) {
enter_internal_self_park(current, do_timed_parked);
} else {
current->_ParkEvent->park();
reenter_internal_self_park(current);
}

// Try again, but just so we distinguish between futile wakeups and
// successful wakeups. The following test isn't algorithmically
// necessary, but it helps us maintain sensible statistics.
if (try_lock(current) == TryLockResult::Success) {
break;
}
Expand All @@ -1026,124 +1073,67 @@ void ObjectMonitor::enter_internal(JavaThread* current) {
// We can defer clearing _succ until after the spin completes
// try_spin() must tolerate being called with _succ == current.
// Try yet another round of adaptive spinning.
if (try_spin(current)) {
if (enterPath && try_spin(current)) {
break;
}

// We can find that we were unpark()ed and redesignated _succ while
// we were spinning. That's harmless. If we iterate and call park(),
// park() will consume the event and return immediately and we'll
// just spin again. This pattern can repeat, leaving _succ to simply
// spin on a CPU.
// We can find that we were unpark()ed and redesignated _succ while
// we were spinning. That's harmless. If we iterate and call park(),
// park() will consume the event and return immediately and we'll
// just spin again. This pattern can repeat, leaving _succ to simply
// spin on a CPU.

if (has_successor(current)) clear_successor();

// Invariant: after clearing _succ a thread *must* retry _owner before parking.
OrderAccess::fence();
}

// Egress :
// Current has acquired the lock -- Unlink current from the _entry_list.
unlink_after_acquire(current, &node);
if (has_successor(current)) {
clear_successor();
// Note that we don't need to do OrderAccess::fence() after clearing
// _succ here, since we own the lock.
}

// We've acquired ownership with CAS().
// CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
// But since the CAS() this thread may have also stored into _succ
// or entry_list. These meta-data updates must be visible __before
// this thread subsequently drops the lock.
// Consider what could occur if we didn't enforce this constraint --
// STs to monitor meta-data and user-data could reorder with (become
// visible after) the ST in exit that drops ownership of the lock.
// Some other thread could then acquire the lock, but observe inconsistent
// or old monitor meta-data and heap data. That violates the JMM.
// To that end, the exit() operation must have at least STST|LDST
// "release" barrier semantics. Specifically, there must be at least a
// STST|LDST barrier in exit() before the ST of null into _owner that drops
// the lock. The barrier ensures that changes to monitor meta-data and data
// protected by the lock will be visible before we release the lock, and
// therefore before some other thread (CPU) has a chance to acquire the lock.
// See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
//
// Critically, any prior STs to _succ or entry_list must be visible before
// the ST of null into _owner in the *subsequent* (following) corresponding
// monitorexit.

return;
}

// reenter_internal() is a specialized inline form of the latter half of the
// contended slow-path from enter_internal(). We use reenter_internal() only for
// monitor reentry in wait().
//
// In the future we should reconcile enter_internal() and reenter_internal().

void ObjectMonitor::reenter_internal(JavaThread* current, ObjectWaiter* currentNode) {
assert(current != nullptr, "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
assert(currentNode != nullptr, "invariant");
assert(currentNode->_thread == current, "invariant");
assert(_waiters > 0, "invariant");
assert_mark_word_consistency();

for (;;) {
ObjectWaiter::TStates v = currentNode->TState;
guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
assert(!has_owner(current), "invariant");

// This thread has been notified so try to reacquire the lock.
if (try_lock(current) == TryLockResult::Success) {
break;
if (enterPath) {
// Egress :
// Current has acquired the lock -- Unlink current from the _entry_list.
unlink_after_acquire(current, &node);
if (has_successor(current)) {
clear_successor();
// Note that we don't need to do OrderAccess::fence() after clearing
// _succ here, since we own the lock.
}

// If that fails, spin again. Note that spin count may be zero so the above TryLock
// is necessary.
if (try_spin(current)) {
break;
}

{
OSThreadContendState osts(current->osthread());

assert(current->thread_state() == _thread_in_vm, "invariant");

{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
current->_ParkEvent->park();
}
}

// Try again, but just so we distinguish between futile wakeups and
// successful wakeups. The following test isn't algorithmically
// necessary, but it helps us maintain sensible statistics.
if (try_lock(current) == TryLockResult::Success) {
break;
}

// The lock is still contested.
// We've acquired ownership with CAS().
// CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
// But since the CAS() this thread may have also stored into _succ
// or entry_list. These meta-data updates must be visible __before
// this thread subsequently drops the lock.
// Consider what could occur if we didn't enforce this constraint --
// STs to monitor meta-data and user-data could reorder with (become
// visible after) the ST in exit that drops ownership of the lock.
// Some other thread could then acquire the lock, but observe inconsistent
// or old monitor meta-data and heap data. That violates the JMM.
// To that end, the exit() operation must have at least STST|LDST
// "release" barrier semantics. Specifically, there must be at least a
// STST|LDST barrier in exit() before the ST of null into _owner that drops
// the lock. The barrier ensures that changes to monitor meta-data and data
// protected by the lock will be visible before we release the lock, and
// therefore before some other thread (CPU) has a chance to acquire the lock.
// See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
//
// Critically, any prior STs to _succ or entry_list must be visible before
// the ST of null into _owner in the *subsequent* (following) corresponding
// monitorexit.

// Assuming this is not a spurious wakeup we'll normally
// find that _succ == current.
return;
} else {
// Reenter path
// Current has acquired the lock -- Unlink current from the _entry_list.
assert(has_owner(current), "invariant");
assert_mark_word_consistency();
unlink_after_acquire(current, currentNode);
if (has_successor(current)) clear_successor();

// Invariant: after clearing _succ a contending thread
// *must* retry _owner before parking.
OrderAccess::fence();
assert(!has_successor(current), "invariant");
currentNode->TState = ObjectWaiter::TS_RUN;
OrderAccess::fence(); // see comments at the end of previous case
}

// Current has acquired the lock -- Unlink current from the _entry_list.
assert(has_owner(current), "invariant");
assert_mark_word_consistency();
unlink_after_acquire(current, currentNode);
if (has_successor(current)) clear_successor();
assert(!has_successor(current), "invariant");
currentNode->TState = ObjectWaiter::TS_RUN;
OrderAccess::fence(); // see comments at the end of enter_internal()
}

// This method is called from two places:
Expand Down Expand Up @@ -1933,7 +1923,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
enter(current);
} else {
guarantee(v == ObjectWaiter::TS_ENTER, "invariant");
reenter_internal(current, &node);
enter_internal(current, &node);
node.wait_reenter_end(this);
}

Expand Down
Loading