mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 01:54:47 +02:00
8132849: Increased stop time in cleanup phase because of single-threaded walk of thread stacks in NMethodSweeper::mark_active_nmethods()
Reviewed-by: eosterlund, zgu, thartmann
This commit is contained in:
parent
7c9ab50d06
commit
c87e7672a6
3 changed files with 99 additions and 22 deletions
|
@ -598,7 +598,8 @@ private:
|
|||
|
||||
public:
|
||||
ParallelSPCleanupThreadClosure(DeflateMonitorCounters* counters) :
|
||||
_nmethod_cl(NMethodSweeper::prepare_mark_active_nmethods()), _counters(counters) {}
|
||||
_nmethod_cl(UseCodeAging ? NMethodSweeper::prepare_reset_hotness_counters() : NULL),
|
||||
_counters(counters) {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
ObjectSynchronizer::deflate_thread_local_monitors(thread, _counters);
|
||||
|
|
|
@ -28,15 +28,19 @@
|
|||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/handshake.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
@ -197,6 +201,38 @@ bool NMethodSweeper::wait_for_stack_scanning() {
|
|||
return _current.end();
|
||||
}
|
||||
|
||||
class NMethodMarkingThreadClosure : public ThreadClosure {
|
||||
private:
|
||||
CodeBlobClosure* _cl;
|
||||
public:
|
||||
NMethodMarkingThreadClosure(CodeBlobClosure* cl) : _cl(cl) {}
|
||||
void do_thread(Thread* thread) {
|
||||
if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) {
|
||||
JavaThread* jt = (JavaThread*) thread;
|
||||
jt->nmethods_do(_cl);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class NMethodMarkingTask : public AbstractGangTask {
|
||||
private:
|
||||
NMethodMarkingThreadClosure* _cl;
|
||||
public:
|
||||
NMethodMarkingTask(NMethodMarkingThreadClosure* cl) :
|
||||
AbstractGangTask("Parallel NMethod Marking"),
|
||||
_cl(cl) {
|
||||
Threads::change_thread_claim_parity();
|
||||
}
|
||||
|
||||
~NMethodMarkingTask() {
|
||||
Threads::assert_all_threads_claimed();
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
Threads::possibly_parallel_threads_do(true, _cl);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Scans the stacks of all Java threads and marks activations of not-entrant methods.
|
||||
* No need to synchronize access, since 'mark_active_nmethods' is always executed at a
|
||||
|
@ -205,12 +241,56 @@ bool NMethodSweeper::wait_for_stack_scanning() {
|
|||
void NMethodSweeper::mark_active_nmethods() {
|
||||
CodeBlobClosure* cl = prepare_mark_active_nmethods();
|
||||
if (cl != NULL) {
|
||||
Threads::nmethods_do(cl);
|
||||
WorkGang* workers = Universe::heap()->get_safepoint_workers();
|
||||
if (workers != NULL) {
|
||||
NMethodMarkingThreadClosure tcl(cl);
|
||||
NMethodMarkingTask task(&tcl);
|
||||
workers->run_task(&task);
|
||||
} else {
|
||||
Threads::nmethods_do(cl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
|
||||
#ifdef ASSERT
|
||||
if (ThreadLocalHandshakes) {
|
||||
assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread");
|
||||
assert_lock_strong(CodeCache_lock);
|
||||
} else {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||
}
|
||||
#endif
|
||||
|
||||
// If we do not want to reclaim not-entrant or zombie methods there is no need
|
||||
// to scan stacks
|
||||
if (!MethodFlushing) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Increase time so that we can estimate when to invoke the sweeper again.
|
||||
_time_counter++;
|
||||
|
||||
// Check for restart
|
||||
assert(_current.method() == NULL, "should only happen between sweeper cycles");
|
||||
assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
|
||||
|
||||
_seen = 0;
|
||||
_current = CompiledMethodIterator();
|
||||
// Initialize to first nmethod
|
||||
_current.next();
|
||||
_traversals += 1;
|
||||
_total_time_this_sweep = Tickspan();
|
||||
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("### Sweep: stack traversal %ld", _traversals);
|
||||
}
|
||||
return &mark_activation_closure;
|
||||
}
|
||||
|
||||
CodeBlobClosure* NMethodSweeper::prepare_reset_hotness_counters() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||
|
||||
// If we do not want to reclaim not-entrant or zombie methods there is no need
|
||||
// to scan stacks
|
||||
if (!MethodFlushing) {
|
||||
|
@ -231,24 +311,7 @@ CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
|
|||
}
|
||||
}
|
||||
|
||||
if (wait_for_stack_scanning()) {
|
||||
_seen = 0;
|
||||
_current = CompiledMethodIterator();
|
||||
// Initialize to first nmethod
|
||||
_current.next();
|
||||
_traversals += 1;
|
||||
_total_time_this_sweep = Tickspan();
|
||||
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("### Sweep: stack traversal %ld", _traversals);
|
||||
}
|
||||
return &mark_activation_closure;
|
||||
|
||||
} else {
|
||||
// Only set hotness counter
|
||||
return &set_hotness_closure;
|
||||
}
|
||||
|
||||
return &set_hotness_closure;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -258,8 +321,20 @@ CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
|
|||
void NMethodSweeper::do_stack_scanning() {
|
||||
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||
if (wait_for_stack_scanning()) {
|
||||
VM_MarkActiveNMethods op;
|
||||
VMThread::execute(&op);
|
||||
if (ThreadLocalHandshakes) {
|
||||
CodeBlobClosure* code_cl;
|
||||
{
|
||||
MutexLockerEx ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
code_cl = prepare_mark_active_nmethods();
|
||||
}
|
||||
if (code_cl != NULL) {
|
||||
NMethodMarkingThreadClosure tcl(code_cl);
|
||||
Handshake::execute(&tcl);
|
||||
}
|
||||
} else {
|
||||
VM_MarkActiveNMethods op;
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
_should_sweep = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,6 +117,7 @@ class NMethodSweeper : public AllStatic {
|
|||
|
||||
static void mark_active_nmethods(); // Invoked at the end of each safepoint
|
||||
static CodeBlobClosure* prepare_mark_active_nmethods();
|
||||
static CodeBlobClosure* prepare_reset_hotness_counters();
|
||||
static void sweeper_loop();
|
||||
static void notify(int code_blob_type); // Possibly start the sweeper thread.
|
||||
static void force_sweep();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue