mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 14:54:52 +02:00
8253180: ZGC: Implementation of JEP 376: ZGC: Concurrent Thread-Stack Processing
Reviewed-by: stefank, pliden, rehn, neliasso, coleenp, smonteith
This commit is contained in:
parent
a2f651904d
commit
b9873e1833
131 changed files with 2428 additions and 572 deletions
|
@ -1966,9 +1966,10 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_polling() && C->is_method_compilation()) {
|
if (do_polling() && C->is_method_compilation()) {
|
||||||
st->print("# touch polling page\n\t");
|
st->print("# test polling word\n\t");
|
||||||
st->print("ldr rscratch1, [rthread],#polling_page_offset\n\t");
|
st->print("ldr rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
|
||||||
st->print("ldr zr, [rscratch1]");
|
st->print("cmp sp, rscratch1\n\t");
|
||||||
|
st->print("bhi #slow_path");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1985,7 +1986,13 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_polling() && C->is_method_compilation()) {
|
if (do_polling() && C->is_method_compilation()) {
|
||||||
__ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
|
Label dummy_label;
|
||||||
|
Label* code_stub = &dummy_label;
|
||||||
|
if (!C->output()->in_scratch_emit_size()) {
|
||||||
|
code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
|
||||||
|
}
|
||||||
|
__ relocate(relocInfo::poll_return_type);
|
||||||
|
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,19 @@
|
||||||
|
|
||||||
#define __ ce->masm()->
|
#define __ ce->masm()->
|
||||||
|
|
||||||
|
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||||
|
__ bind(_entry);
|
||||||
|
InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
|
||||||
|
__ adr(rscratch1, safepoint_pc);
|
||||||
|
__ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
|
||||||
|
|
||||||
|
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||||
|
"polling page return stub not created yet");
|
||||||
|
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||||
|
|
||||||
|
__ far_jump(RuntimeAddress(stub));
|
||||||
|
}
|
||||||
|
|
||||||
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||||
__ bind(_entry);
|
__ bind(_entry);
|
||||||
Metadata *m = _method->as_constant_ptr()->as_metadata();
|
Metadata *m = _method->as_constant_ptr()->as_metadata();
|
||||||
|
|
|
@ -504,7 +504,7 @@ void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void LIR_Assembler::return_op(LIR_Opr result) {
|
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||||
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
|
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
|
||||||
|
|
||||||
// Pop the stack before the safepoint code
|
// Pop the stack before the safepoint code
|
||||||
|
@ -514,7 +514,9 @@ void LIR_Assembler::return_op(LIR_Opr result) {
|
||||||
__ reserved_stack_check();
|
__ reserved_stack_check();
|
||||||
}
|
}
|
||||||
|
|
||||||
__ fetch_and_read_polling_page(rscratch1, relocInfo::poll_return_type);
|
code_stub->set_safepoint_offset(__ offset());
|
||||||
|
__ relocate(relocInfo::poll_return_type);
|
||||||
|
__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||||
__ ret(lr);
|
__ ret(lr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "asm/macroAssembler.hpp"
|
||||||
|
#include "opto/compile.hpp"
|
||||||
|
#include "opto/node.hpp"
|
||||||
|
#include "opto/output.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
|
||||||
|
#define __ masm.
|
||||||
|
void C2SafepointPollStubTable::emit_stub_impl(MacroAssembler& masm, C2SafepointPollStub* entry) const {
|
||||||
|
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||||
|
"polling page return stub not created yet");
|
||||||
|
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||||
|
|
||||||
|
RuntimeAddress callback_addr(stub);
|
||||||
|
|
||||||
|
__ bind(entry->_stub_label);
|
||||||
|
InternalAddress safepoint_pc(masm.pc() - masm.offset() + entry->_safepoint_offset);
|
||||||
|
__ adr(rscratch1, safepoint_pc);
|
||||||
|
__ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
|
||||||
|
__ far_jump(callback_addr);
|
||||||
|
}
|
||||||
|
#undef __
|
|
@ -37,6 +37,7 @@
|
||||||
#include "runtime/monitorChunk.hpp"
|
#include "runtime/monitorChunk.hpp"
|
||||||
#include "runtime/os.inline.hpp"
|
#include "runtime/os.inline.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/stubCodeGenerator.hpp"
|
#include "runtime/stubCodeGenerator.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "vmreg_aarch64.inline.hpp"
|
#include "vmreg_aarch64.inline.hpp"
|
||||||
|
@ -476,8 +477,8 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// frame::sender
|
// frame::sender_raw
|
||||||
frame frame::sender(RegisterMap* map) const {
|
frame frame::sender_raw(RegisterMap* map) const {
|
||||||
// Default is we done have to follow them. The sender_for_xxx will
|
// Default is we done have to follow them. The sender_for_xxx will
|
||||||
// update it accordingly
|
// update it accordingly
|
||||||
map->set_include_argument_oops(false);
|
map->set_include_argument_oops(false);
|
||||||
|
@ -499,6 +500,16 @@ frame frame::sender(RegisterMap* map) const {
|
||||||
return frame(sender_sp(), link(), sender_pc());
|
return frame(sender_sp(), link(), sender_pc());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
frame frame::sender(RegisterMap* map) const {
|
||||||
|
frame result = sender_raw(map);
|
||||||
|
|
||||||
|
if (map->process_frames()) {
|
||||||
|
StackWatermarkSet::on_iteration(map->thread(), result);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||||
// These are reasonable sanity checks
|
// These are reasonable sanity checks
|
||||||
|
|
|
@ -161,4 +161,7 @@
|
||||||
|
|
||||||
static jint interpreter_frame_expression_stack_direction() { return -1; }
|
static jint interpreter_frame_expression_stack_direction() { return -1; }
|
||||||
|
|
||||||
|
// returns the sending frame, without applying any barriers
|
||||||
|
frame sender_raw(RegisterMap* map) const;
|
||||||
|
|
||||||
#endif // CPU_AARCH64_FRAME_AARCH64_HPP
|
#endif // CPU_AARCH64_FRAME_AARCH64_HPP
|
||||||
|
|
|
@ -24,10 +24,9 @@
|
||||||
#ifndef CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
#ifndef CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
||||||
#define CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
#define CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
||||||
|
|
||||||
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
||||||
const size_t ZPlatformHeapViews = 3;
|
const size_t ZPlatformHeapViews = 3;
|
||||||
const size_t ZPlatformNMethodDisarmedOffset = 4;
|
const size_t ZPlatformCacheLineSize = 64;
|
||||||
const size_t ZPlatformCacheLineSize = 64;
|
|
||||||
|
|
||||||
size_t ZPlatformAddressOffsetBits();
|
size_t ZPlatformAddressOffsetBits();
|
||||||
size_t ZPlatformAddressMetadataShift();
|
size_t ZPlatformAddressMetadataShift();
|
||||||
|
|
|
@ -473,7 +473,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||||
|
|
||||||
if (needs_thread_local_poll) {
|
if (needs_thread_local_poll) {
|
||||||
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
||||||
ldr(rscratch2, Address(rthread, Thread::polling_page_offset()));
|
ldr(rscratch2, Address(rthread, Thread::polling_word_offset()));
|
||||||
tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
|
tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -521,6 +521,7 @@ void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
|
||||||
|
|
||||||
// remove activation
|
// remove activation
|
||||||
//
|
//
|
||||||
|
// Apply stack watermark barrier.
|
||||||
// Unlock the receiver if this is a synchronized method.
|
// Unlock the receiver if this is a synchronized method.
|
||||||
// Unlock any Java monitors from syncronized blocks.
|
// Unlock any Java monitors from syncronized blocks.
|
||||||
// Remove the activation from the stack.
|
// Remove the activation from the stack.
|
||||||
|
@ -541,6 +542,19 @@ void InterpreterMacroAssembler::remove_activation(
|
||||||
// result check if synchronized method
|
// result check if synchronized method
|
||||||
Label unlocked, unlock, no_unlock;
|
Label unlocked, unlock, no_unlock;
|
||||||
|
|
||||||
|
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
|
||||||
|
// that would normally not be safe to use. Such bad returns into unsafe territory of
|
||||||
|
// the stack, will call InterpreterRuntime::at_unwind.
|
||||||
|
Label slow_path;
|
||||||
|
Label fast_path;
|
||||||
|
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||||
|
br(Assembler::AL, fast_path);
|
||||||
|
bind(slow_path);
|
||||||
|
push(state);
|
||||||
|
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind));
|
||||||
|
pop(state);
|
||||||
|
bind(fast_path);
|
||||||
|
|
||||||
// get the value of _do_not_unlock_if_synchronized into r3
|
// get the value of _do_not_unlock_if_synchronized into r3
|
||||||
const Address do_not_unlock_if_synchronized(rthread,
|
const Address do_not_unlock_if_synchronized(rthread,
|
||||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||||
|
|
|
@ -288,27 +288,21 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
|
||||||
return address(((uint64_t)insn_addr + (offset << 2)));
|
return address(((uint64_t)insn_addr + (offset << 2)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::safepoint_poll(Label& slow_path) {
|
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
|
||||||
ldr(rscratch1, Address(rthread, Thread::polling_page_offset()));
|
if (acquire) {
|
||||||
tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
|
lea(rscratch1, Address(rthread, Thread::polling_word_offset()));
|
||||||
}
|
ldar(rscratch1, rscratch1);
|
||||||
|
} else {
|
||||||
// Just like safepoint_poll, but use an acquiring load for thread-
|
ldr(rscratch1, Address(rthread, Thread::polling_word_offset()));
|
||||||
// local polling.
|
}
|
||||||
//
|
if (at_return) {
|
||||||
// We need an acquire here to ensure that any subsequent load of the
|
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
|
||||||
// global SafepointSynchronize::_state flag is ordered after this load
|
// we may safely use the sp instead to perform the stack watermark check.
|
||||||
// of the local Thread::_polling page. We don't want this poll to
|
cmp(in_nmethod ? sp : rfp, rscratch1);
|
||||||
// return false (i.e. not safepointing) and a later poll of the global
|
br(Assembler::HI, slow_path);
|
||||||
// SafepointSynchronize::_state spuriously to return true.
|
} else {
|
||||||
//
|
tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
|
||||||
// This is to avoid a race when we're in a native->Java transition
|
}
|
||||||
// racing the code which wakes up from a safepoint.
|
|
||||||
//
|
|
||||||
void MacroAssembler::safepoint_poll_acquire(Label& slow_path) {
|
|
||||||
lea(rscratch1, Address(rthread, Thread::polling_page_offset()));
|
|
||||||
ldar(rscratch1, rscratch1);
|
|
||||||
tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
|
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
|
||||||
|
@ -4405,13 +4399,6 @@ void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype)
|
||||||
ldr(dest, Address(rthread, Thread::polling_page_offset()));
|
ldr(dest, Address(rthread, Thread::polling_page_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move the address of the polling page into r, then read the polling
|
|
||||||
// page.
|
|
||||||
address MacroAssembler::fetch_and_read_polling_page(Register r, relocInfo::relocType rtype) {
|
|
||||||
get_polling_page(r, rtype);
|
|
||||||
return read_polling_page(r, rtype);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the polling page. The address of the polling page must
|
// Read the polling page. The address of the polling page must
|
||||||
// already be in r.
|
// already be in r.
|
||||||
address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
|
address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
|
||||||
|
|
|
@ -102,8 +102,7 @@ class MacroAssembler: public Assembler {
|
||||||
virtual void check_and_handle_popframe(Register java_thread);
|
virtual void check_and_handle_popframe(Register java_thread);
|
||||||
virtual void check_and_handle_earlyret(Register java_thread);
|
virtual void check_and_handle_earlyret(Register java_thread);
|
||||||
|
|
||||||
void safepoint_poll(Label& slow_path);
|
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
|
||||||
void safepoint_poll_acquire(Label& slow_path);
|
|
||||||
|
|
||||||
// Biased locking support
|
// Biased locking support
|
||||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||||
|
@ -1231,7 +1230,6 @@ public:
|
||||||
|
|
||||||
address read_polling_page(Register r, relocInfo::relocType rtype);
|
address read_polling_page(Register r, relocInfo::relocType rtype);
|
||||||
void get_polling_page(Register dest, relocInfo::relocType rtype);
|
void get_polling_page(Register dest, relocInfo::relocType rtype);
|
||||||
address fetch_and_read_polling_page(Register r, relocInfo::relocType rtype);
|
|
||||||
|
|
||||||
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
|
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
|
||||||
void update_byte_crc32(Register crc, Register val, Register table);
|
void update_byte_crc32(Register crc, Register val, Register table);
|
||||||
|
|
|
@ -1877,7 +1877,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// check for safepoint operation in progress and/or pending suspend requests
|
// check for safepoint operation in progress and/or pending suspend requests
|
||||||
Label safepoint_in_progress, safepoint_in_progress_done;
|
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||||
{
|
{
|
||||||
__ safepoint_poll_acquire(safepoint_in_progress);
|
// We need an acquire here to ensure that any subsequent load of the
|
||||||
|
// global SafepointSynchronize::_state flag is ordered after this load
|
||||||
|
// of the thread-local polling word. We don't want this poll to
|
||||||
|
// return false (i.e. not safepointing) and a later poll of the global
|
||||||
|
// SafepointSynchronize::_state spuriously to return true.
|
||||||
|
//
|
||||||
|
// This is to avoid a race when we're in a native->Java transition
|
||||||
|
// racing the code which wakes up from a safepoint.
|
||||||
|
|
||||||
|
__ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||||
__ bind(safepoint_in_progress_done);
|
__ bind(safepoint_in_progress_done);
|
||||||
|
|
|
@ -980,7 +980,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
// If we need a safepoint check, generate full interpreter entry.
|
||||||
__ safepoint_poll(slow_path);
|
__ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||||
|
|
||||||
// We don't generate local frame and don't align stack because
|
// We don't generate local frame and don't align stack because
|
||||||
// we call stub code and there is no safepoint on this path.
|
// we call stub code and there is no safepoint on this path.
|
||||||
|
@ -1029,7 +1029,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
// If we need a safepoint check, generate full interpreter entry.
|
||||||
__ safepoint_poll(slow_path);
|
__ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||||
|
|
||||||
// We don't generate local frame and don't align stack because
|
// We don't generate local frame and don't align stack because
|
||||||
// we call stub code and there is no safepoint on this path.
|
// we call stub code and there is no safepoint on this path.
|
||||||
|
@ -1388,7 +1388,16 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||||
// check for safepoint operation in progress and/or pending suspend requests
|
// check for safepoint operation in progress and/or pending suspend requests
|
||||||
{
|
{
|
||||||
Label L, Continue;
|
Label L, Continue;
|
||||||
__ safepoint_poll_acquire(L);
|
|
||||||
|
// We need an acquire here to ensure that any subsequent load of the
|
||||||
|
// global SafepointSynchronize::_state flag is ordered after this load
|
||||||
|
// of the thread-local polling word. We don't want this poll to
|
||||||
|
// return false (i.e. not safepointing) and a later poll of the global
|
||||||
|
// SafepointSynchronize::_state spuriously to return true.
|
||||||
|
//
|
||||||
|
// This is to avoid a race when we're in a native->Java transition
|
||||||
|
// racing the code which wakes up from a safepoint.
|
||||||
|
__ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||||
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
|
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||||
__ cbz(rscratch2, Continue);
|
__ cbz(rscratch2, Continue);
|
||||||
__ bind(L);
|
__ bind(L);
|
||||||
|
|
|
@ -128,6 +128,7 @@ public:
|
||||||
static int get_initial_sve_vector_length() { return _initial_sve_vector_length; };
|
static int get_initial_sve_vector_length() { return _initial_sve_vector_length; };
|
||||||
|
|
||||||
static bool supports_fast_class_init_checks() { return true; }
|
static bool supports_fast_class_init_checks() { return true; }
|
||||||
|
constexpr static bool supports_stack_watermark_barrier() { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP
|
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP
|
||||||
|
|
|
@ -38,6 +38,10 @@
|
||||||
|
|
||||||
#define __ ce->masm()->
|
#define __ ce->masm()->
|
||||||
|
|
||||||
|
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||||
__ bind(_entry);
|
__ bind(_entry);
|
||||||
ce->store_parameter(_bci, 0);
|
ce->store_parameter(_bci, 0);
|
||||||
|
|
|
@ -283,7 +283,7 @@ int LIR_Assembler::emit_deopt_handler() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LIR_Assembler::return_op(LIR_Opr result) {
|
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||||
// Pop the frame before safepoint polling
|
// Pop the frame before safepoint polling
|
||||||
__ remove_frame(initial_frame_size_in_bytes());
|
__ remove_frame(initial_frame_size_in_bytes());
|
||||||
__ read_polling_page(Rtemp, relocInfo::poll_return_type);
|
__ read_polling_page(Rtemp, relocInfo::poll_return_type);
|
||||||
|
|
|
@ -580,7 +580,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||||
|
|
||||||
if (needs_thread_local_poll) {
|
if (needs_thread_local_poll) {
|
||||||
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
||||||
ldr(Rtemp, Address(Rthread, Thread::polling_page_offset()));
|
ldr(Rtemp, Address(Rthread, Thread::polling_word_offset()));
|
||||||
tbnz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), safepoint);
|
tbnz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), safepoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1914,7 +1914,7 @@ void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::safepoint_poll(Register tmp1, Label& slow_path) {
|
void MacroAssembler::safepoint_poll(Register tmp1, Label& slow_path) {
|
||||||
ldr_u32(tmp1, Address(Rthread, Thread::polling_page_offset()));
|
ldr_u32(tmp1, Address(Rthread, Thread::polling_word_offset()));
|
||||||
tst(tmp1, exact_log2(SafepointMechanism::poll_bit()));
|
tst(tmp1, exact_log2(SafepointMechanism::poll_bit()));
|
||||||
b(slow_path, eq);
|
b(slow_path, eq);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,9 @@
|
||||||
|
|
||||||
#define __ ce->masm()->
|
#define __ ce->masm()->
|
||||||
|
|
||||||
|
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
|
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
|
||||||
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
|
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
|
||||||
|
|
|
@ -1324,7 +1324,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LIR_Assembler::return_op(LIR_Opr result) {
|
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||||
const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
|
const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
|
||||||
const Register polling_page = R12;
|
const Register polling_page = R12;
|
||||||
|
|
||||||
|
|
|
@ -223,7 +223,7 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register byt
|
||||||
address *sfpt_tbl = Interpreter::safept_table(state);
|
address *sfpt_tbl = Interpreter::safept_table(state);
|
||||||
if (table != sfpt_tbl) {
|
if (table != sfpt_tbl) {
|
||||||
Label dispatch;
|
Label dispatch;
|
||||||
ld(R0, in_bytes(Thread::polling_page_offset()), R16_thread);
|
ld(R0, in_bytes(Thread::polling_word_offset()), R16_thread);
|
||||||
// Armed page has poll_bit set, if poll bit is cleared just continue.
|
// Armed page has poll_bit set, if poll bit is cleared just continue.
|
||||||
andi_(R0, R0, SafepointMechanism::poll_bit());
|
andi_(R0, R0, SafepointMechanism::poll_bit());
|
||||||
beq(CCR0, dispatch);
|
beq(CCR0, dispatch);
|
||||||
|
|
|
@ -3044,7 +3044,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
|
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
|
||||||
ld(temp_reg, in_bytes(Thread::polling_page_offset()), R16_thread);
|
ld(temp_reg, in_bytes(Thread::polling_word_offset()), R16_thread);
|
||||||
// Armed page has poll_bit set.
|
// Armed page has poll_bit set.
|
||||||
andi_(temp_reg, temp_reg, SafepointMechanism::poll_bit());
|
andi_(temp_reg, temp_reg, SafepointMechanism::poll_bit());
|
||||||
bne(CCR0, slow_path);
|
bne(CCR0, slow_path);
|
||||||
|
|
|
@ -2173,7 +2173,7 @@ void TemplateTable::_return(TosState state) {
|
||||||
|
|
||||||
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
|
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
|
||||||
Label no_safepoint;
|
Label no_safepoint;
|
||||||
__ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread);
|
__ ld(R11_scratch1, in_bytes(Thread::polling_word_offset()), R16_thread);
|
||||||
__ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit());
|
__ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit());
|
||||||
__ beq(CCR0, no_safepoint);
|
__ beq(CCR0, no_safepoint);
|
||||||
__ push(state);
|
__ push(state);
|
||||||
|
|
|
@ -41,6 +41,10 @@
|
||||||
#undef CHECK_BAILOUT
|
#undef CHECK_BAILOUT
|
||||||
#define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
|
#define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
|
||||||
|
|
||||||
|
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
|
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
|
||||||
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
|
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
|
||||||
assert(info != NULL, "must have info");
|
assert(info != NULL, "must have info");
|
||||||
|
|
|
@ -1207,7 +1207,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LIR_Assembler::return_op(LIR_Opr result) {
|
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||||
assert(result->is_illegal() ||
|
assert(result->is_illegal() ||
|
||||||
(result->is_single_cpu() && result->as_register() == Z_R2) ||
|
(result->is_single_cpu() && result->as_register() == Z_R2) ||
|
||||||
(result->is_double_cpu() && result->as_register_lo() == Z_R2) ||
|
(result->is_double_cpu() && result->as_register_lo() == Z_R2) ||
|
||||||
|
|
|
@ -121,7 +121,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bo
|
||||||
address *sfpt_tbl = Interpreter::safept_table(state);
|
address *sfpt_tbl = Interpreter::safept_table(state);
|
||||||
if (table != sfpt_tbl) {
|
if (table != sfpt_tbl) {
|
||||||
Label dispatch;
|
Label dispatch;
|
||||||
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
|
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_word_offset()) + 7 /* Big Endian */);
|
||||||
// Armed page has poll_bit set, if poll bit is cleared just continue.
|
// Armed page has poll_bit set, if poll bit is cleared just continue.
|
||||||
z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
|
z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
|
||||||
z_braz(dispatch);
|
z_braz(dispatch);
|
||||||
|
|
|
@ -2680,7 +2680,7 @@ uint MacroAssembler::get_poll_register(address instr_loc) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
|
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
|
||||||
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
|
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_word_offset()) + 7 /* Big Endian */);
|
||||||
// Armed page has poll_bit set.
|
// Armed page has poll_bit set.
|
||||||
z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
|
z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
|
||||||
z_brnaz(slow_path);
|
z_brnaz(slow_path);
|
||||||
|
|
|
@ -2377,7 +2377,7 @@ void TemplateTable::_return(TosState state) {
|
||||||
|
|
||||||
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
|
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
|
||||||
Label no_safepoint;
|
Label no_safepoint;
|
||||||
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
|
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_word_offset()) + 7 /* Big Endian */);
|
||||||
__ z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
|
__ z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
|
||||||
__ z_braz(no_safepoint);
|
__ z_braz(no_safepoint);
|
||||||
__ push(state);
|
__ push(state);
|
||||||
|
|
|
@ -79,6 +79,22 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
|
||||||
}
|
}
|
||||||
#endif // !_LP64
|
#endif // !_LP64
|
||||||
|
|
||||||
|
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||||
|
#ifdef _LP64
|
||||||
|
__ bind(_entry);
|
||||||
|
InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
|
||||||
|
__ lea(rscratch1, safepoint_pc);
|
||||||
|
__ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
|
||||||
|
|
||||||
|
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||||
|
"polling page return stub not created yet");
|
||||||
|
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||||
|
__ jump(RuntimeAddress(stub));
|
||||||
|
#else
|
||||||
|
ShouldNotReachHere();
|
||||||
|
#endif /* _LP64 */
|
||||||
|
}
|
||||||
|
|
||||||
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||||
__ bind(_entry);
|
__ bind(_entry);
|
||||||
Metadata *m = _method->as_constant_ptr()->as_metadata();
|
Metadata *m = _method->as_constant_ptr()->as_metadata();
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "c1/c1_CodeStubs.hpp"
|
||||||
#include "c1/c1_Compilation.hpp"
|
#include "c1/c1_Compilation.hpp"
|
||||||
#include "c1/c1_LIRAssembler.hpp"
|
#include "c1/c1_LIRAssembler.hpp"
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
|
@ -517,8 +518,7 @@ int LIR_Assembler::emit_deopt_handler() {
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||||
void LIR_Assembler::return_op(LIR_Opr result) {
|
|
||||||
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
|
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
|
||||||
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
|
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
|
||||||
assert(result->fpu() == 0, "result must already be on TOS");
|
assert(result->fpu() == 0, "result must already be on TOS");
|
||||||
|
@ -531,22 +531,21 @@ void LIR_Assembler::return_op(LIR_Opr result) {
|
||||||
__ reserved_stack_check();
|
__ reserved_stack_check();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool result_is_oop = result->is_valid() ? result->is_oop() : false;
|
|
||||||
|
|
||||||
// Note: we do not need to round double result; float result has the right precision
|
// Note: we do not need to round double result; float result has the right precision
|
||||||
// the poll sets the condition code, but no data registers
|
// the poll sets the condition code, but no data registers
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
const Register poll_addr = rscratch1;
|
code_stub->set_safepoint_offset(__ offset());
|
||||||
__ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
|
__ relocate(relocInfo::poll_return_type);
|
||||||
|
__ safepoint_poll(*code_stub->entry(), r15_thread, true /* at_return */, true /* in_nmethod */);
|
||||||
#else
|
#else
|
||||||
const Register poll_addr = rbx;
|
const Register poll_addr = rbx;
|
||||||
assert(FrameMap::is_caller_save_register(poll_addr), "will overwrite");
|
assert(FrameMap::is_caller_save_register(poll_addr), "will overwrite");
|
||||||
__ get_thread(poll_addr);
|
__ get_thread(poll_addr);
|
||||||
__ movptr(poll_addr, Address(poll_addr, Thread::polling_page_offset()));
|
__ movptr(poll_addr, Address(poll_addr, Thread::polling_page_offset()));
|
||||||
#endif
|
|
||||||
__ relocate(relocInfo::poll_return_type);
|
__ relocate(relocInfo::poll_return_type);
|
||||||
__ testl(rax, Address(poll_addr, 0));
|
__ testl(rax, Address(poll_addr, 0));
|
||||||
|
#endif
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
46
src/hotspot/cpu/x86/c2_safepointPollStubTable_x86.cpp
Normal file
46
src/hotspot/cpu/x86/c2_safepointPollStubTable_x86.cpp
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "asm/macroAssembler.hpp"
|
||||||
|
#include "opto/compile.hpp"
|
||||||
|
#include "opto/node.hpp"
|
||||||
|
#include "opto/output.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
|
||||||
|
#define __ masm.
|
||||||
|
void C2SafepointPollStubTable::emit_stub_impl(MacroAssembler& masm, C2SafepointPollStub* entry) const {
|
||||||
|
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||||
|
"polling page return stub not created yet");
|
||||||
|
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||||
|
|
||||||
|
RuntimeAddress callback_addr(stub);
|
||||||
|
|
||||||
|
__ bind(entry->_stub_label);
|
||||||
|
InternalAddress safepoint_pc(masm.pc() - masm.offset() + entry->_safepoint_offset);
|
||||||
|
__ lea(rscratch1, safepoint_pc);
|
||||||
|
__ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
|
||||||
|
__ jump(callback_addr);
|
||||||
|
}
|
||||||
|
#undef __
|
|
@ -36,6 +36,7 @@
|
||||||
#include "runtime/monitorChunk.hpp"
|
#include "runtime/monitorChunk.hpp"
|
||||||
#include "runtime/os.inline.hpp"
|
#include "runtime/os.inline.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/stubCodeGenerator.hpp"
|
#include "runtime/stubCodeGenerator.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "vmreg_x86.inline.hpp"
|
#include "vmreg_x86.inline.hpp"
|
||||||
|
@ -469,8 +470,8 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||||
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// frame::sender
|
// frame::sender_raw
|
||||||
frame frame::sender(RegisterMap* map) const {
|
frame frame::sender_raw(RegisterMap* map) const {
|
||||||
// Default is we done have to follow them. The sender_for_xxx will
|
// Default is we done have to follow them. The sender_for_xxx will
|
||||||
// update it accordingly
|
// update it accordingly
|
||||||
map->set_include_argument_oops(false);
|
map->set_include_argument_oops(false);
|
||||||
|
@ -487,6 +488,16 @@ frame frame::sender(RegisterMap* map) const {
|
||||||
return frame(sender_sp(), link(), sender_pc());
|
return frame(sender_sp(), link(), sender_pc());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
frame frame::sender(RegisterMap* map) const {
|
||||||
|
frame result = sender_raw(map);
|
||||||
|
|
||||||
|
if (map->process_frames()) {
|
||||||
|
StackWatermarkSet::on_iteration(map->thread(), result);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||||
// These are reasonable sanity checks
|
// These are reasonable sanity checks
|
||||||
|
|
|
@ -156,4 +156,7 @@
|
||||||
|
|
||||||
static jint interpreter_frame_expression_stack_direction() { return -1; }
|
static jint interpreter_frame_expression_stack_direction() { return -1; }
|
||||||
|
|
||||||
|
// returns the sending frame, without applying any barriers
|
||||||
|
frame sender_raw(RegisterMap* map) const;
|
||||||
|
|
||||||
#endif // CPU_X86_FRAME_X86_HPP
|
#endif // CPU_X86_FRAME_X86_HPP
|
||||||
|
|
|
@ -24,10 +24,9 @@
|
||||||
#ifndef CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
#ifndef CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
||||||
#define CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
#define CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
||||||
|
|
||||||
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
||||||
const size_t ZPlatformHeapViews = 3;
|
const size_t ZPlatformHeapViews = 3;
|
||||||
const size_t ZPlatformNMethodDisarmedOffset = 4;
|
const size_t ZPlatformCacheLineSize = 64;
|
||||||
const size_t ZPlatformCacheLineSize = 64;
|
|
||||||
|
|
||||||
size_t ZPlatformAddressOffsetBits();
|
size_t ZPlatformAddressOffsetBits();
|
||||||
size_t ZPlatformAddressMetadataShift();
|
size_t ZPlatformAddressMetadataShift();
|
||||||
|
|
|
@ -853,7 +853,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||||
Label no_safepoint, dispatch;
|
Label no_safepoint, dispatch;
|
||||||
if (table != safepoint_table && generate_poll) {
|
if (table != safepoint_table && generate_poll) {
|
||||||
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
||||||
testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
|
testb(Address(r15_thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
|
||||||
|
|
||||||
jccb(Assembler::zero, no_safepoint);
|
jccb(Assembler::zero, no_safepoint);
|
||||||
lea(rscratch1, ExternalAddress((address)safepoint_table));
|
lea(rscratch1, ExternalAddress((address)safepoint_table));
|
||||||
|
@ -872,7 +872,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||||
Label no_safepoint;
|
Label no_safepoint;
|
||||||
const Register thread = rcx;
|
const Register thread = rcx;
|
||||||
get_thread(thread);
|
get_thread(thread);
|
||||||
testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
|
testb(Address(thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
|
||||||
|
|
||||||
jccb(Assembler::zero, no_safepoint);
|
jccb(Assembler::zero, no_safepoint);
|
||||||
ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);
|
ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);
|
||||||
|
@ -961,6 +961,7 @@ void InterpreterMacroAssembler::narrow(Register result) {
|
||||||
|
|
||||||
// remove activation
|
// remove activation
|
||||||
//
|
//
|
||||||
|
// Apply stack watermark barrier.
|
||||||
// Unlock the receiver if this is a synchronized method.
|
// Unlock the receiver if this is a synchronized method.
|
||||||
// Unlock any Java monitors from syncronized blocks.
|
// Unlock any Java monitors from syncronized blocks.
|
||||||
// Remove the activation from the stack.
|
// Remove the activation from the stack.
|
||||||
|
@ -989,6 +990,19 @@ void InterpreterMacroAssembler::remove_activation(
|
||||||
// because rdx may have the result in it
|
// because rdx may have the result in it
|
||||||
NOT_LP64(get_thread(rcx);)
|
NOT_LP64(get_thread(rcx);)
|
||||||
|
|
||||||
|
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
|
||||||
|
// that would normally not be safe to use. Such bad returns into unsafe territory of
|
||||||
|
// the stack, will call InterpreterRuntime::at_unwind.
|
||||||
|
Label slow_path;
|
||||||
|
Label fast_path;
|
||||||
|
safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
|
||||||
|
jmp(fast_path);
|
||||||
|
bind(slow_path);
|
||||||
|
push(state);
|
||||||
|
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind));
|
||||||
|
pop(state);
|
||||||
|
bind(fast_path);
|
||||||
|
|
||||||
// get the value of _do_not_unlock_if_synchronized into rdx
|
// get the value of _do_not_unlock_if_synchronized into rdx
|
||||||
const Address do_not_unlock_if_synchronized(rthread,
|
const Address do_not_unlock_if_synchronized(rthread,
|
||||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||||
|
|
|
@ -2699,16 +2699,17 @@ void MacroAssembler::save_rax(Register tmp) {
|
||||||
else if (tmp != rax) mov(tmp, rax);
|
else if (tmp != rax) mov(tmp, rax);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) {
|
void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
assert(thread_reg == r15_thread, "should be");
|
if (at_return) {
|
||||||
#else
|
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
|
||||||
if (thread_reg == noreg) {
|
// we may safely use rsp instead to perform the stack watermark check.
|
||||||
thread_reg = temp_reg;
|
cmpq(Address(thread_reg, Thread::polling_word_offset()), in_nmethod ? rsp : rbp);
|
||||||
get_thread(thread_reg);
|
jcc(Assembler::above, slow_path);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
testb(Address(thread_reg, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
|
testb(Address(thread_reg, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
|
||||||
jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
|
jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -647,9 +647,7 @@ class MacroAssembler: public Assembler {
|
||||||
Register tmp,
|
Register tmp,
|
||||||
int offset);
|
int offset);
|
||||||
|
|
||||||
// If thread_reg is != noreg the code assumes the register passed contains
|
void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
|
||||||
// the thread (required on 64 bit).
|
|
||||||
void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
|
|
||||||
|
|
||||||
void verify_tlab();
|
void verify_tlab();
|
||||||
|
|
||||||
|
|
|
@ -2237,7 +2237,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// check for safepoint operation in progress and/or pending suspend requests
|
// check for safepoint operation in progress and/or pending suspend requests
|
||||||
{ Label Continue, slow_path;
|
{ Label Continue, slow_path;
|
||||||
|
|
||||||
__ safepoint_poll(slow_path, thread, noreg);
|
__ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
|
||||||
|
|
||||||
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
|
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
|
||||||
__ jcc(Assembler::equal, Continue);
|
__ jcc(Assembler::equal, Continue);
|
||||||
|
|
|
@ -2595,7 +2595,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
Label Continue;
|
Label Continue;
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
|
|
||||||
__ safepoint_poll(slow_path, r15_thread, rscratch1);
|
__ safepoint_poll(slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
|
||||||
|
|
||||||
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
|
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
|
||||||
__ jcc(Assembler::equal, Continue);
|
__ jcc(Assembler::equal, Continue);
|
||||||
|
|
|
@ -1106,11 +1106,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||||
Label Continue;
|
Label Continue;
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
|
|
||||||
#ifndef _LP64
|
__ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
|
||||||
__ safepoint_poll(slow_path, thread, noreg);
|
|
||||||
#else
|
|
||||||
__ safepoint_poll(slow_path, r15_thread, rscratch1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
|
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
|
||||||
__ jcc(Assembler::equal, Continue);
|
__ jcc(Assembler::equal, Continue);
|
||||||
|
|
|
@ -62,7 +62,8 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
// If we need a safepoint check, generate full interpreter entry.
|
||||||
__ safepoint_poll(slow_path, noreg, rdi);
|
__ get_thread(rdi);
|
||||||
|
__ safepoint_poll(slow_path, rdi, false /* at_return */, false /* in_nmethod */);
|
||||||
|
|
||||||
// We don't generate local frame and don't align stack because
|
// We don't generate local frame and don't align stack because
|
||||||
// we call stub code and there is no safepoint on this path.
|
// we call stub code and there is no safepoint on this path.
|
||||||
|
@ -111,7 +112,8 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
// If we need a safepoint check, generate full interpreter entry.
|
||||||
__ safepoint_poll(slow_path, noreg, rdi);
|
__ get_thread(rdi);
|
||||||
|
__ safepoint_poll(slow_path, rdi, false /* at_return */, false /* in_nmethod */);
|
||||||
|
|
||||||
// We don't generate local frame and don't align stack because
|
// We don't generate local frame and don't align stack because
|
||||||
// we call stub code and there is no safepoint on this path.
|
// we call stub code and there is no safepoint on this path.
|
||||||
|
|
|
@ -191,7 +191,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
// c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
|
// c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
__ safepoint_poll(slow_path, r15_thread, rscratch1);
|
__ safepoint_poll(slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
|
||||||
|
|
||||||
// We don't generate local frame and don't align stack because
|
// We don't generate local frame and don't align stack because
|
||||||
// we call stub code and there is no safepoint on this path.
|
// we call stub code and there is no safepoint on this path.
|
||||||
|
@ -237,7 +237,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||||
// r13: senderSP must preserved for slow path, set SP to it on fast path
|
// r13: senderSP must preserved for slow path, set SP to it on fast path
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
__ safepoint_poll(slow_path, r15_thread, rscratch1);
|
__ safepoint_poll(slow_path, r15_thread, false /* at_return */, false /* in_nmethod */);
|
||||||
|
|
||||||
// We don't generate local frame and don't align stack because
|
// We don't generate local frame and don't align stack because
|
||||||
// we call stub code and there is no safepoint on this path.
|
// we call stub code and there is no safepoint on this path.
|
||||||
|
|
|
@ -2658,16 +2658,16 @@ void TemplateTable::_return(TosState state) {
|
||||||
Label no_safepoint;
|
Label no_safepoint;
|
||||||
NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
|
NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
__ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
|
__ testb(Address(r15_thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
|
||||||
#else
|
#else
|
||||||
const Register thread = rdi;
|
const Register thread = rdi;
|
||||||
__ get_thread(thread);
|
__ get_thread(thread);
|
||||||
__ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
|
__ testb(Address(thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
|
||||||
#endif
|
#endif
|
||||||
__ jcc(Assembler::zero, no_safepoint);
|
__ jcc(Assembler::zero, no_safepoint);
|
||||||
__ push(state);
|
__ push(state);
|
||||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||||
InterpreterRuntime::at_safepoint));
|
InterpreterRuntime::at_safepoint));
|
||||||
__ pop(state);
|
__ pop(state);
|
||||||
__ bind(no_safepoint);
|
__ bind(no_safepoint);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
#include "runtime/abstract_vm_version.hpp"
|
#include "runtime/abstract_vm_version.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
class VM_Version : public Abstract_VM_Version {
|
class VM_Version : public Abstract_VM_Version {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
@ -1021,6 +1022,10 @@ public:
|
||||||
return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32
|
return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr static bool supports_stack_watermark_barrier() {
|
||||||
|
return LP64_ONLY(true) NOT_LP64(false);
|
||||||
|
}
|
||||||
|
|
||||||
// there are several insns to force cache line sync to memory which
|
// there are several insns to force cache line sync to memory which
|
||||||
// we can use to ensure mapped non-volatile memory is up to date with
|
// we can use to ensure mapped non-volatile memory is up to date with
|
||||||
// pending in-cache changes.
|
// pending in-cache changes.
|
||||||
|
|
|
@ -932,8 +932,8 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||||
st->print_cr("popq rbp");
|
st->print_cr("popq rbp");
|
||||||
if (do_polling() && C->is_method_compilation()) {
|
if (do_polling() && C->is_method_compilation()) {
|
||||||
st->print("\t");
|
st->print("\t");
|
||||||
st->print_cr("movq rscratch1, poll_offset[r15_thread] #polling_page_address\n\t"
|
st->print_cr("cmpq poll_offset[r15_thread], rsp\n\t"
|
||||||
"testl rax, [rscratch1]\t"
|
"ja #safepoint_stub\t"
|
||||||
"# Safepoint: poll for GC");
|
"# Safepoint: poll for GC");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -980,9 +980,13 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
||||||
|
|
||||||
if (do_polling() && C->is_method_compilation()) {
|
if (do_polling() && C->is_method_compilation()) {
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
__ movq(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
|
Label dummy_label;
|
||||||
|
Label* code_stub = &dummy_label;
|
||||||
|
if (!C->output()->in_scratch_emit_size()) {
|
||||||
|
code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
|
||||||
|
}
|
||||||
__ relocate(relocInfo::poll_return_type);
|
__ relocate(relocInfo::poll_return_type);
|
||||||
__ testl(rax, Address(rscratch1, 0));
|
__ safepoint_poll(*code_stub, r15_thread, true /* at_return */, true /* in_nmethod */);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,6 +89,28 @@ class CodeStubList: public GrowableArray<CodeStub*> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class C1SafepointPollStub: public CodeStub {
|
||||||
|
private:
|
||||||
|
uintptr_t _safepoint_offset;
|
||||||
|
|
||||||
|
public:
|
||||||
|
C1SafepointPollStub() :
|
||||||
|
_safepoint_offset(0) {
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t safepoint_offset() { return _safepoint_offset; }
|
||||||
|
void set_safepoint_offset(uintptr_t safepoint_offset) { _safepoint_offset = safepoint_offset; }
|
||||||
|
|
||||||
|
virtual void emit_code(LIR_Assembler* e);
|
||||||
|
virtual void visit(LIR_OpVisitState* visitor) {
|
||||||
|
// don't pass in the code emit info since it's processed in the fast path
|
||||||
|
visitor->do_slow_case();
|
||||||
|
}
|
||||||
|
#ifndef PRODUCT
|
||||||
|
virtual void print_name(outputStream* out) const { out->print("C1SafepointPollStub"); }
|
||||||
|
#endif // PRODUCT
|
||||||
|
};
|
||||||
|
|
||||||
class CounterOverflowStub: public CodeStub {
|
class CounterOverflowStub: public CodeStub {
|
||||||
private:
|
private:
|
||||||
CodeEmitInfo* _info;
|
CodeEmitInfo* _info;
|
||||||
|
|
|
@ -23,11 +23,13 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "c1/c1_CodeStubs.hpp"
|
||||||
#include "c1/c1_InstructionPrinter.hpp"
|
#include "c1/c1_InstructionPrinter.hpp"
|
||||||
#include "c1/c1_LIR.hpp"
|
#include "c1/c1_LIR.hpp"
|
||||||
#include "c1/c1_LIRAssembler.hpp"
|
#include "c1/c1_LIRAssembler.hpp"
|
||||||
#include "c1/c1_ValueStack.hpp"
|
#include "c1/c1_ValueStack.hpp"
|
||||||
#include "ci/ciInstance.hpp"
|
#include "ci/ciInstance.hpp"
|
||||||
|
#include "runtime/safepointMechanism.inline.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
|
||||||
Register LIR_OprDesc::as_register() const {
|
Register LIR_OprDesc::as_register() const {
|
||||||
|
@ -447,7 +449,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
|
||||||
case lir_fld: // input always valid, result and info always invalid
|
case lir_fld: // input always valid, result and info always invalid
|
||||||
case lir_push: // input always valid, result and info always invalid
|
case lir_push: // input always valid, result and info always invalid
|
||||||
case lir_pop: // input always valid, result and info always invalid
|
case lir_pop: // input always valid, result and info always invalid
|
||||||
case lir_return: // input always valid, result and info always invalid
|
|
||||||
case lir_leal: // input and result always valid, info always invalid
|
case lir_leal: // input and result always valid, info always invalid
|
||||||
case lir_monaddr: // input and result always valid, info always invalid
|
case lir_monaddr: // input and result always valid, info always invalid
|
||||||
case lir_null_check: // input and info always valid, result always invalid
|
case lir_null_check: // input and info always valid, result always invalid
|
||||||
|
@ -463,6 +464,19 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case lir_return:
|
||||||
|
{
|
||||||
|
assert(op->as_OpReturn() != NULL, "must be");
|
||||||
|
LIR_OpReturn* op_ret = (LIR_OpReturn*)op;
|
||||||
|
|
||||||
|
if (op_ret->_info) do_info(op_ret->_info);
|
||||||
|
if (op_ret->_opr->is_valid()) do_input(op_ret->_opr);
|
||||||
|
if (op_ret->_result->is_valid()) do_output(op_ret->_result);
|
||||||
|
if (op_ret->stub() != NULL) do_stub(op_ret->stub());
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case lir_safepoint:
|
case lir_safepoint:
|
||||||
{
|
{
|
||||||
assert(op->as_Op1() != NULL, "must be");
|
assert(op->as_Op1() != NULL, "must be");
|
||||||
|
@ -948,6 +962,15 @@ bool LIR_OpVisitState::no_operands(LIR_Op* op) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// LIR_OpReturn
|
||||||
|
LIR_OpReturn::LIR_OpReturn(LIR_Opr opr) :
|
||||||
|
LIR_Op1(lir_return, opr, (CodeEmitInfo*)NULL /* info */),
|
||||||
|
_stub(NULL) {
|
||||||
|
if (VM_Version::supports_stack_watermark_barrier()) {
|
||||||
|
_stub = new C1SafepointPollStub();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//---------------------------------------------------
|
//---------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ class LIR_Assembler;
|
||||||
class CodeEmitInfo;
|
class CodeEmitInfo;
|
||||||
class CodeStub;
|
class CodeStub;
|
||||||
class CodeStubList;
|
class CodeStubList;
|
||||||
|
class C1SafepointPollStub;
|
||||||
class ArrayCopyStub;
|
class ArrayCopyStub;
|
||||||
class LIR_Op;
|
class LIR_Op;
|
||||||
class ciType;
|
class ciType;
|
||||||
|
@ -856,6 +857,7 @@ class LIR_Op1;
|
||||||
class LIR_OpBranch;
|
class LIR_OpBranch;
|
||||||
class LIR_OpConvert;
|
class LIR_OpConvert;
|
||||||
class LIR_OpAllocObj;
|
class LIR_OpAllocObj;
|
||||||
|
class LIR_OpReturn;
|
||||||
class LIR_OpRoundFP;
|
class LIR_OpRoundFP;
|
||||||
class LIR_Op2;
|
class LIR_Op2;
|
||||||
class LIR_OpDelay;
|
class LIR_OpDelay;
|
||||||
|
@ -1116,6 +1118,7 @@ class LIR_Op: public CompilationResourceObj {
|
||||||
virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
|
virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
|
||||||
virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
|
virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
|
||||||
virtual LIR_OpBranch* as_OpBranch() { return NULL; }
|
virtual LIR_OpBranch* as_OpBranch() { return NULL; }
|
||||||
|
virtual LIR_OpReturn* as_OpReturn() { return NULL; }
|
||||||
virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
|
virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
|
||||||
virtual LIR_OpConvert* as_OpConvert() { return NULL; }
|
virtual LIR_OpConvert* as_OpConvert() { return NULL; }
|
||||||
virtual LIR_Op0* as_Op0() { return NULL; }
|
virtual LIR_Op0* as_Op0() { return NULL; }
|
||||||
|
@ -1439,6 +1442,18 @@ class LIR_OpBranch: public LIR_Op {
|
||||||
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
|
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class LIR_OpReturn: public LIR_Op1 {
|
||||||
|
friend class LIR_OpVisitState;
|
||||||
|
|
||||||
|
private:
|
||||||
|
C1SafepointPollStub* _stub;
|
||||||
|
|
||||||
|
public:
|
||||||
|
LIR_OpReturn(LIR_Opr opr);
|
||||||
|
|
||||||
|
C1SafepointPollStub* stub() const { return _stub; }
|
||||||
|
virtual LIR_OpReturn* as_OpReturn() { return this; }
|
||||||
|
};
|
||||||
|
|
||||||
class ConversionStub;
|
class ConversionStub;
|
||||||
|
|
||||||
|
@ -2094,9 +2109,8 @@ class LIR_List: public CompilationResourceObj {
|
||||||
void metadata2reg (Metadata* o, LIR_Opr reg) { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg)); }
|
void metadata2reg (Metadata* o, LIR_Opr reg) { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg)); }
|
||||||
void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
|
void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
|
||||||
|
|
||||||
void return_op(LIR_Opr result) { append(new LIR_Op1(lir_return, result)); }
|
|
||||||
|
|
||||||
void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); }
|
void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); }
|
||||||
|
void return_op(LIR_Opr result) { append(new LIR_OpReturn(result)); }
|
||||||
|
|
||||||
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
|
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
|
||||||
|
|
||||||
|
|
|
@ -521,9 +521,15 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case lir_return:
|
case lir_return: {
|
||||||
return_op(op->in_opr());
|
assert(op->as_OpReturn() != NULL, "sanity");
|
||||||
|
LIR_OpReturn *ret_op = (LIR_OpReturn*)op;
|
||||||
|
return_op(ret_op->in_opr(), ret_op->stub());
|
||||||
|
if (ret_op->stub() != NULL) {
|
||||||
|
append_code_stub(ret_op->stub());
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case lir_safepoint:
|
case lir_safepoint:
|
||||||
if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
|
if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
|
||||||
|
|
|
@ -157,7 +157,7 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||||
// particular sparc uses this for delay slot filling.
|
// particular sparc uses this for delay slot filling.
|
||||||
void peephole(LIR_List* list);
|
void peephole(LIR_List* list);
|
||||||
|
|
||||||
void return_op(LIR_Opr result);
|
void return_op(LIR_Opr result, C1SafepointPollStub* code_stub);
|
||||||
|
|
||||||
// returns offset of poll instruction
|
// returns offset of poll instruction
|
||||||
int safepoint_poll(LIR_Opr result, CodeEmitInfo* info);
|
int safepoint_poll(LIR_Opr result, CodeEmitInfo* info);
|
||||||
|
|
|
@ -6410,7 +6410,7 @@ void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
|
||||||
if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) {
|
if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) {
|
||||||
// replace the jump to a return with a direct return
|
// replace the jump to a return with a direct return
|
||||||
// Note: currently the edge between the blocks is not deleted
|
// Note: currently the edge between the blocks is not deleted
|
||||||
pred_instructions->at_put(pred_instructions->length() - 1, new LIR_Op1(lir_return, return_opr));
|
pred_instructions->at_put(pred_instructions->length() - 1, new LIR_OpReturn(return_opr));
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
return_converted.set_bit(pred->block_id());
|
return_converted.set_bit(pred->block_id());
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -63,6 +63,7 @@
|
||||||
#include "runtime/interfaceSupport.inline.hpp"
|
#include "runtime/interfaceSupport.inline.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/threadCritical.hpp"
|
#include "runtime/threadCritical.hpp"
|
||||||
#include "runtime/vframe.inline.hpp"
|
#include "runtime/vframe.inline.hpp"
|
||||||
#include "runtime/vframeArray.hpp"
|
#include "runtime/vframeArray.hpp"
|
||||||
|
@ -505,6 +506,17 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||||
thread->set_is_method_handle_return(false);
|
thread->set_is_method_handle_return(false);
|
||||||
|
|
||||||
Handle exception(thread, ex);
|
Handle exception(thread, ex);
|
||||||
|
|
||||||
|
// This function is called when we are about to throw an exception. Therefore,
|
||||||
|
// we have to poll the stack watermark barrier to make sure that not yet safe
|
||||||
|
// stack frames are made safe before returning into them.
|
||||||
|
if (thread->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {
|
||||||
|
// The Runtime1::handle_exception_from_callee_id handler is invoked after the
|
||||||
|
// frame has been unwound. It instead builds its own stub frame, to call the
|
||||||
|
// runtime. But the throwing frame has already been unwound here.
|
||||||
|
StackWatermarkSet::after_unwind(thread);
|
||||||
|
}
|
||||||
|
|
||||||
nm = CodeCache::find_nmethod(pc);
|
nm = CodeCache::find_nmethod(pc);
|
||||||
assert(nm != NULL, "this is not an nmethod");
|
assert(nm != NULL, "this is not an nmethod");
|
||||||
// Adjust the pc as needed/
|
// Adjust the pc as needed/
|
||||||
|
|
|
@ -2436,10 +2436,10 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
|
||||||
// The "ASSERT" here is to verify this method generates the exactly same stack
|
// The "ASSERT" here is to verify this method generates the exactly same stack
|
||||||
// trace as utilizing vframe.
|
// trace as utilizing vframe.
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
vframeStream st(thread);
|
vframeStream st(thread, false /* stop_at_java_call_stub */, false /* process_frames */);
|
||||||
#endif
|
#endif
|
||||||
int total_count = 0;
|
int total_count = 0;
|
||||||
RegisterMap map(thread, false);
|
RegisterMap map(thread, false /* update */, false /* process_frames */);
|
||||||
int decode_offset = 0;
|
int decode_offset = 0;
|
||||||
CompiledMethod* nm = NULL;
|
CompiledMethod* nm = NULL;
|
||||||
bool skip_fillInStackTrace_check = false;
|
bool skip_fillInStackTrace_check = false;
|
||||||
|
@ -2581,7 +2581,7 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
|
||||||
assert(backtrace.not_null(), "backtrace should have been preallocated");
|
assert(backtrace.not_null(), "backtrace should have been preallocated");
|
||||||
|
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
vframeStream st(THREAD);
|
vframeStream st(THREAD, false /* stop_at_java_call_stub */, false /* process_frames */);
|
||||||
|
|
||||||
BacktraceBuilder bt(THREAD, backtrace);
|
BacktraceBuilder bt(THREAD, backtrace);
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include "runtime/frame.inline.hpp"
|
#include "runtime/frame.inline.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.inline.hpp"
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "utilities/lockFreeStack.hpp"
|
#include "utilities/lockFreeStack.hpp"
|
||||||
#ifdef COMPILER1
|
#ifdef COMPILER1
|
||||||
|
@ -189,13 +190,21 @@ void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
|
||||||
add(map);
|
add(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_derived_oop(oop* base, oop* derived) {
|
static void add_derived_oop(oop* base, oop* derived, OopClosure* oop_fn) {
|
||||||
#if !defined(TIERED) && !INCLUDE_JVMCI
|
|
||||||
COMPILER1_PRESENT(ShouldNotReachHere();)
|
|
||||||
#endif // !defined(TIERED) && !INCLUDE_JVMCI
|
|
||||||
#if COMPILER2_OR_JVMCI
|
|
||||||
DerivedPointerTable::add(derived, base);
|
DerivedPointerTable::add(derived, base);
|
||||||
#endif // COMPILER2_OR_JVMCI
|
}
|
||||||
|
|
||||||
|
static void ignore_derived_oop(oop* base, oop* derived, OopClosure* oop_fn) {
|
||||||
|
}
|
||||||
|
|
||||||
|
static void process_derived_oop(oop* base, oop* derived, OopClosure* oop_fn) {
|
||||||
|
// All derived pointers must be processed before the base pointer of any derived pointer is processed.
|
||||||
|
// Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
|
||||||
|
// offset, if the base pointer is processed in the first derived pointer.
|
||||||
|
uintptr_t offset = cast_from_oop<uintptr_t>(*derived) - cast_from_oop<uintptr_t>(*base);
|
||||||
|
*derived = *base;
|
||||||
|
oop_fn->do_oop(derived);
|
||||||
|
*derived = cast_to_oop(cast_from_oop<uintptr_t>(*derived) + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -227,14 +236,23 @@ static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
|
||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
|
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
|
||||||
// add derived oops to a table
|
switch (mode) {
|
||||||
all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl);
|
case DerivedPointerIterationMode::_directly:
|
||||||
|
all_do(fr, reg_map, f, process_derived_oop, &do_nothing_cl);
|
||||||
|
break;
|
||||||
|
case DerivedPointerIterationMode::_with_table:
|
||||||
|
all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl);
|
||||||
|
break;
|
||||||
|
case DerivedPointerIterationMode::_ignore:
|
||||||
|
all_do(fr, reg_map, f, ignore_derived_oop, &do_nothing_cl);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||||
OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
|
OopClosure* oop_fn, void derived_oop_fn(oop*, oop*, OopClosure*),
|
||||||
OopClosure* value_fn) {
|
OopClosure* value_fn) {
|
||||||
CodeBlob* cb = fr->cb();
|
CodeBlob* cb = fr->cb();
|
||||||
assert(cb != NULL, "no codeblob");
|
assert(cb != NULL, "no codeblob");
|
||||||
|
@ -271,7 +289,7 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||||
// The narrow_oop_base could be NULL or be the address
|
// The narrow_oop_base could be NULL or be the address
|
||||||
// of the page below heap depending on compressed oops mode.
|
// of the page below heap depending on compressed oops mode.
|
||||||
if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
|
if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
|
||||||
derived_oop_fn(base_loc, derived_loc);
|
derived_oop_fn(base_loc, derived_loc, oop_fn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,20 +314,6 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||||
// of the page below heap depending on compressed oops mode.
|
// of the page below heap depending on compressed oops mode.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
#ifdef ASSERT
|
|
||||||
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
|
||||||
!Universe::heap()->is_in_or_null(*loc)) {
|
|
||||||
tty->print_cr("# Found non oop pointer. Dumping state at failure");
|
|
||||||
// try to dump out some helpful debugging information
|
|
||||||
trace_codeblob_maps(fr, reg_map);
|
|
||||||
omv.print();
|
|
||||||
tty->print_cr("register r");
|
|
||||||
omv.reg()->print();
|
|
||||||
tty->print_cr("loc = %p *loc = %p\n", loc, cast_from_oop<address>(*loc));
|
|
||||||
// do the real assert.
|
|
||||||
assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
|
|
||||||
}
|
|
||||||
#endif // ASSERT
|
|
||||||
oop_fn->do_oop(loc);
|
oop_fn->do_oop(loc);
|
||||||
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
||||||
narrowOop *nl = (narrowOop*)loc;
|
narrowOop *nl = (narrowOop*)loc;
|
||||||
|
@ -694,27 +698,26 @@ inline intptr_t value_of_loc(oop *pointer) {
|
||||||
void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
|
void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
|
||||||
assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
|
assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
|
||||||
assert(derived_loc != base_loc, "Base and derived in same location");
|
assert(derived_loc != base_loc, "Base and derived in same location");
|
||||||
if (_active) {
|
assert(*derived_loc != (void*)base_loc, "location already added");
|
||||||
assert(*derived_loc != (void*)base_loc, "location already added");
|
assert(Entry::_list != NULL, "list must exist");
|
||||||
assert(Entry::_list != NULL, "list must exist");
|
assert(is_active(), "table must be active here");
|
||||||
intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
|
intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
|
||||||
// This assert is invalid because derived pointers can be
|
// This assert is invalid because derived pointers can be
|
||||||
// arbitrarily far away from their base.
|
// arbitrarily far away from their base.
|
||||||
// assert(offset >= -1000000, "wrong derived pointer info");
|
// assert(offset >= -1000000, "wrong derived pointer info");
|
||||||
|
|
||||||
if (TraceDerivedPointers) {
|
if (TraceDerivedPointers) {
|
||||||
tty->print_cr(
|
tty->print_cr(
|
||||||
"Add derived pointer@" INTPTR_FORMAT
|
"Add derived pointer@" INTPTR_FORMAT
|
||||||
" - Derived: " INTPTR_FORMAT
|
" - Derived: " INTPTR_FORMAT
|
||||||
" Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
|
" Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
|
||||||
p2i(derived_loc), p2i(*derived_loc), p2i(*base_loc), p2i(base_loc), offset
|
p2i(derived_loc), p2i(*derived_loc), p2i(*base_loc), p2i(base_loc), offset
|
||||||
);
|
);
|
||||||
}
|
|
||||||
// Set derived oop location to point to base.
|
|
||||||
*derived_loc = (oop)base_loc;
|
|
||||||
Entry* entry = new Entry(derived_loc, offset);
|
|
||||||
Entry::_list->push(*entry);
|
|
||||||
}
|
}
|
||||||
|
// Set derived oop location to point to base.
|
||||||
|
*derived_loc = (oop)base_loc;
|
||||||
|
Entry* entry = new Entry(derived_loc, offset);
|
||||||
|
Entry::_list->push(*entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DerivedPointerTable::update_pointers() {
|
void DerivedPointerTable::update_pointers() {
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
//
|
//
|
||||||
// OopMapValue describes a single OopMap entry
|
// OopMapValue describes a single OopMap entry
|
||||||
|
|
||||||
|
enum class DerivedPointerIterationMode;
|
||||||
class frame;
|
class frame;
|
||||||
class RegisterMap;
|
class RegisterMap;
|
||||||
class OopClosure;
|
class OopClosure;
|
||||||
|
@ -196,7 +197,6 @@ class OopMap: public ResourceObj {
|
||||||
bool equals(const OopMap* other) const;
|
bool equals(const OopMap* other) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class OopMapSet : public ResourceObj {
|
class OopMapSet : public ResourceObj {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
private:
|
private:
|
||||||
|
@ -221,13 +221,15 @@ class OopMapSet : public ResourceObj {
|
||||||
|
|
||||||
// Iterates through frame for a compiled method
|
// Iterates through frame for a compiled method
|
||||||
static void oops_do (const frame* fr,
|
static void oops_do (const frame* fr,
|
||||||
const RegisterMap* reg_map, OopClosure* f);
|
const RegisterMap* reg_map,
|
||||||
|
OopClosure* f,
|
||||||
|
DerivedPointerIterationMode mode);
|
||||||
static void update_register_map(const frame* fr, RegisterMap *reg_map);
|
static void update_register_map(const frame* fr, RegisterMap *reg_map);
|
||||||
|
|
||||||
// Iterates through frame for a compiled method for dead ones and values, too
|
// Iterates through frame for a compiled method for dead ones and values, too
|
||||||
static void all_do(const frame* fr, const RegisterMap* reg_map,
|
static void all_do(const frame* fr, const RegisterMap* reg_map,
|
||||||
OopClosure* oop_fn,
|
OopClosure* oop_fn,
|
||||||
void derived_oop_fn(oop* base, oop* derived),
|
void derived_oop_fn(oop* base, oop* derived, OopClosure* oop_fn),
|
||||||
OopClosure* value_fn);
|
OopClosure* value_fn);
|
||||||
|
|
||||||
// Printing
|
// Printing
|
||||||
|
|
|
@ -370,6 +370,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent,
|
||||||
|
// incremental and cooperative. In order for that to work well, mechanisms that stop
|
||||||
|
// another thread might want to ensure its roots are in a sane state.
|
||||||
|
virtual bool uses_stack_watermark_barrier() const { return false; }
|
||||||
|
|
||||||
// Perform a collection of the heap; intended for use in implementing
|
// Perform a collection of the heap; intended for use in implementing
|
||||||
// "System.gc". This probably implies as full a collection as the
|
// "System.gc". This probably implies as full a collection as the
|
||||||
// "CollectedHeap" supports.
|
// "CollectedHeap" supports.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -114,6 +114,10 @@ uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
|
||||||
return during_relocate() ? relocate(addr) : mark<Follow, Strong, Publish>(addr);
|
return during_relocate() ? relocate(addr) : mark<Follow, Strong, Publish>(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uintptr_t ZBarrier::relocate_or_mark_no_follow(uintptr_t addr) {
|
||||||
|
return during_relocate() ? relocate(addr) : mark<DontFollow, Strong, Publish>(addr);
|
||||||
|
}
|
||||||
|
|
||||||
uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
|
uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
|
||||||
return during_relocate() ? relocate(addr) : remap(addr);
|
return during_relocate() ? relocate(addr) : remap(addr);
|
||||||
}
|
}
|
||||||
|
@ -125,6 +129,10 @@ uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
|
||||||
return relocate_or_mark(addr);
|
return relocate_or_mark(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uintptr_t ZBarrier::load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) {
|
||||||
|
return relocate_or_mark_no_follow(addr);
|
||||||
|
}
|
||||||
|
|
||||||
void ZBarrier::load_barrier_on_oop_fields(oop o) {
|
void ZBarrier::load_barrier_on_oop_fields(oop o) {
|
||||||
assert(ZAddress::is_good(ZOop::to_address(o)), "Should be good");
|
assert(ZAddress::is_good(ZOop::to_address(o)), "Should be good");
|
||||||
ZLoadBarrierOopClosure cl;
|
ZLoadBarrierOopClosure cl;
|
||||||
|
@ -198,14 +206,6 @@ uintptr_t ZBarrier::mark_barrier_on_root_oop_slow_path(uintptr_t addr) {
|
||||||
return mark<Follow, Strong, Publish>(addr);
|
return mark<Follow, Strong, Publish>(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t ZBarrier::mark_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
|
||||||
assert(during_mark(), "Invalid phase");
|
|
||||||
|
|
||||||
// Mark
|
|
||||||
return mark<DontFollow, Strong, Publish>(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Relocate barrier
|
// Relocate barrier
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -58,9 +58,11 @@ private:
|
||||||
static uintptr_t remap(uintptr_t addr);
|
static uintptr_t remap(uintptr_t addr);
|
||||||
static uintptr_t relocate(uintptr_t addr);
|
static uintptr_t relocate(uintptr_t addr);
|
||||||
static uintptr_t relocate_or_mark(uintptr_t addr);
|
static uintptr_t relocate_or_mark(uintptr_t addr);
|
||||||
|
static uintptr_t relocate_or_mark_no_follow(uintptr_t addr);
|
||||||
static uintptr_t relocate_or_remap(uintptr_t addr);
|
static uintptr_t relocate_or_remap(uintptr_t addr);
|
||||||
|
|
||||||
static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
|
static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
|
||||||
|
static uintptr_t load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr);
|
||||||
|
|
||||||
static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
|
static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
|
||||||
static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
|
static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
|
||||||
|
@ -72,7 +74,6 @@ private:
|
||||||
static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
|
static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
|
||||||
static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
|
static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
|
||||||
static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
|
static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
|
||||||
static uintptr_t mark_barrier_on_invisible_root_oop_slow_path(uintptr_t addr);
|
|
||||||
|
|
||||||
static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
|
static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
|
||||||
|
|
||||||
|
@ -86,6 +87,7 @@ public:
|
||||||
static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
|
static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
|
||||||
static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
|
static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
|
||||||
static void load_barrier_on_root_oop_field(oop* p);
|
static void load_barrier_on_root_oop_field(oop* p);
|
||||||
|
static void load_barrier_on_invisible_root_oop_field(oop* p);
|
||||||
|
|
||||||
// Weak load barrier
|
// Weak load barrier
|
||||||
static oop weak_load_barrier_on_oop_field(volatile oop* p);
|
static oop weak_load_barrier_on_oop_field(volatile oop* p);
|
||||||
|
|
|
@ -277,6 +277,11 @@ inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
|
||||||
root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
|
root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ZBarrier::load_barrier_on_invisible_root_oop_field(oop* p) {
|
||||||
|
const oop o = *p;
|
||||||
|
root_barrier<is_good_or_null_fast_path, load_barrier_on_invisible_root_oop_slow_path>(p, o);
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Weak load barrier
|
// Weak load barrier
|
||||||
//
|
//
|
||||||
|
@ -407,11 +412,6 @@ inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
|
||||||
root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
|
root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
|
|
||||||
const oop o = *p;
|
|
||||||
root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Relocate barrier
|
// Relocate barrier
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -27,6 +27,7 @@
|
||||||
#include "gc/z/zBarrierSetNMethod.hpp"
|
#include "gc/z/zBarrierSetNMethod.hpp"
|
||||||
#include "gc/z/zGlobals.hpp"
|
#include "gc/z/zGlobals.hpp"
|
||||||
#include "gc/z/zHeap.inline.hpp"
|
#include "gc/z/zHeap.inline.hpp"
|
||||||
|
#include "gc/z/zStackWatermark.hpp"
|
||||||
#include "gc/z/zThreadLocalData.hpp"
|
#include "gc/z/zThreadLocalData.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
@ -40,20 +41,11 @@
|
||||||
class ZBarrierSetC1;
|
class ZBarrierSetC1;
|
||||||
class ZBarrierSetC2;
|
class ZBarrierSetC2;
|
||||||
|
|
||||||
static BarrierSetNMethod* make_barrier_set_nmethod() {
|
|
||||||
// NMethod barriers are only used when class unloading is enabled
|
|
||||||
if (!ClassUnloading) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return new ZBarrierSetNMethod();
|
|
||||||
}
|
|
||||||
|
|
||||||
ZBarrierSet::ZBarrierSet() :
|
ZBarrierSet::ZBarrierSet() :
|
||||||
BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
|
BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
|
||||||
make_barrier_set_c1<ZBarrierSetC1>(),
|
make_barrier_set_c1<ZBarrierSetC1>(),
|
||||||
make_barrier_set_c2<ZBarrierSetC2>(),
|
make_barrier_set_c2<ZBarrierSetC2>(),
|
||||||
make_barrier_set_nmethod(),
|
new ZBarrierSetNMethod(),
|
||||||
BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
|
BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
|
||||||
|
|
||||||
ZBarrierSetAssembler* ZBarrierSet::assembler() {
|
ZBarrierSetAssembler* ZBarrierSet::assembler() {
|
||||||
|
@ -89,6 +81,11 @@ void ZBarrierSet::on_thread_destroy(Thread* thread) {
|
||||||
void ZBarrierSet::on_thread_attach(Thread* thread) {
|
void ZBarrierSet::on_thread_attach(Thread* thread) {
|
||||||
// Set thread local address bad mask
|
// Set thread local address bad mask
|
||||||
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
|
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
|
||||||
|
if (thread->is_Java_thread()) {
|
||||||
|
JavaThread* const jt = thread->as_Java_thread();
|
||||||
|
StackWatermark* const watermark = new ZStackWatermark(jt);
|
||||||
|
StackWatermarkSet::add_watermark(jt, watermark);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZBarrierSet::on_thread_detach(Thread* thread) {
|
void ZBarrierSet::on_thread_detach(Thread* thread) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -62,9 +62,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int* ZBarrierSetNMethod::disarmed_value_address() const {
|
int* ZBarrierSetNMethod::disarmed_value_address() const {
|
||||||
const uintptr_t mask_addr = reinterpret_cast<uintptr_t>(&ZAddressBadMask);
|
return (int*)ZAddressBadMaskHighOrderBitsAddr;
|
||||||
const uintptr_t disarmed_addr = mask_addr + ZNMethodDisarmedOffset;
|
|
||||||
return reinterpret_cast<int*>(disarmed_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteSize ZBarrierSetNMethod::thread_disarmed_offset() const {
|
ByteSize ZBarrierSetNMethod::thread_disarmed_offset() const {
|
||||||
|
|
|
@ -221,6 +221,10 @@ size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||||
return _heap.unsafe_max_tlab_alloc();
|
return _heap.unsafe_max_tlab_alloc();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ZCollectedHeap::uses_stack_watermark_barrier() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
|
GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
|
||||||
return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
|
return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,6 +88,8 @@ public:
|
||||||
virtual size_t max_tlab_size() const;
|
virtual size_t max_tlab_size() const;
|
||||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||||
|
|
||||||
|
virtual bool uses_stack_watermark_barrier() const;
|
||||||
|
|
||||||
virtual GrowableArray<GCMemoryManager*> memory_managers();
|
virtual GrowableArray<GCMemoryManager*> memory_managers();
|
||||||
virtual GrowableArray<MemoryPool*> memory_pools();
|
virtual GrowableArray<MemoryPool*> memory_pools();
|
||||||
|
|
||||||
|
|
|
@ -70,6 +70,10 @@ public:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual bool skip_thread_oop_barriers() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
virtual bool do_operation() = 0;
|
virtual bool do_operation() = 0;
|
||||||
|
|
||||||
virtual bool doit_prologue() {
|
virtual bool doit_prologue() {
|
||||||
|
@ -218,6 +222,10 @@ public:
|
||||||
return VMOp_ZVerify;
|
return VMOp_ZVerify;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual bool skip_thread_oop_barriers() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
virtual void doit() {
|
virtual void doit() {
|
||||||
ZVerify::after_weak_processing();
|
ZVerify::after_weak_processing();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -42,6 +42,13 @@ uintptr_t ZAddressGoodMask;
|
||||||
uintptr_t ZAddressBadMask;
|
uintptr_t ZAddressBadMask;
|
||||||
uintptr_t ZAddressWeakBadMask;
|
uintptr_t ZAddressWeakBadMask;
|
||||||
|
|
||||||
|
static uint32_t* ZAddressCalculateBadMaskHighOrderBitsAddr() {
|
||||||
|
const uintptr_t addr = reinterpret_cast<uintptr_t>(&ZAddressBadMask);
|
||||||
|
return reinterpret_cast<uint32_t*>(addr + ZAddressBadMaskHighOrderBitsOffset);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t* ZAddressBadMaskHighOrderBitsAddr = ZAddressCalculateBadMaskHighOrderBitsAddr();
|
||||||
|
|
||||||
size_t ZAddressOffsetBits;
|
size_t ZAddressOffsetBits;
|
||||||
uintptr_t ZAddressOffsetMask;
|
uintptr_t ZAddressOffsetMask;
|
||||||
size_t ZAddressOffsetMax;
|
size_t ZAddressOffsetMax;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -94,6 +94,13 @@ extern uintptr_t ZAddressGoodMask;
|
||||||
extern uintptr_t ZAddressBadMask;
|
extern uintptr_t ZAddressBadMask;
|
||||||
extern uintptr_t ZAddressWeakBadMask;
|
extern uintptr_t ZAddressWeakBadMask;
|
||||||
|
|
||||||
|
// The bad mask is 64 bit. Its high order 32 bits contain all possible value combinations
|
||||||
|
// that this mask will have. Therefore, the memory where the 32 high order bits are stored,
|
||||||
|
// can be used as a 32 bit GC epoch counter, that has a different bit pattern every time
|
||||||
|
// the bad mask is flipped. This provides a pointer to said 32 bits.
|
||||||
|
extern uint32_t* ZAddressBadMaskHighOrderBitsAddr;
|
||||||
|
const int ZAddressBadMaskHighOrderBitsOffset = LITTLE_ENDIAN_ONLY(4) BIG_ENDIAN_ONLY(0);
|
||||||
|
|
||||||
// Pointer part of address
|
// Pointer part of address
|
||||||
extern size_t ZAddressOffsetBits;
|
extern size_t ZAddressOffsetBits;
|
||||||
const size_t ZAddressOffsetShift = 0;
|
const size_t ZAddressOffsetShift = 0;
|
||||||
|
@ -112,9 +119,6 @@ extern uintptr_t ZAddressMetadataMarked1;
|
||||||
extern uintptr_t ZAddressMetadataRemapped;
|
extern uintptr_t ZAddressMetadataRemapped;
|
||||||
extern uintptr_t ZAddressMetadataFinalizable;
|
extern uintptr_t ZAddressMetadataFinalizable;
|
||||||
|
|
||||||
// NMethod entry barrier
|
|
||||||
const size_t ZNMethodDisarmedOffset = ZPlatformNMethodDisarmedOffset;
|
|
||||||
|
|
||||||
// Cache line size
|
// Cache line size
|
||||||
const size_t ZCacheLineSize = ZPlatformCacheLineSize;
|
const size_t ZCacheLineSize = ZPlatformCacheLineSize;
|
||||||
#define ZCACHE_ALIGNED ATTRIBUTE_ALIGNED(ZCacheLineSize)
|
#define ZCACHE_ALIGNED ATTRIBUTE_ALIGNED(ZCacheLineSize)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -33,6 +33,7 @@
|
||||||
#include "gc/z/zRootsIterator.hpp"
|
#include "gc/z/zRootsIterator.hpp"
|
||||||
#include "gc/z/zStat.hpp"
|
#include "gc/z/zStat.hpp"
|
||||||
#include "memory/iterator.inline.hpp"
|
#include "memory/iterator.inline.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "utilities/bitMap.inline.hpp"
|
#include "utilities/bitMap.inline.hpp"
|
||||||
#include "utilities/stack.inline.hpp"
|
#include "utilities/stack.inline.hpp"
|
||||||
|
|
||||||
|
@ -83,6 +84,11 @@ public:
|
||||||
virtual void do_oop(narrowOop* p) {
|
virtual void do_oop(narrowOop* p) {
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void do_thread(Thread* thread) {
|
||||||
|
CodeBlobToOopClosure code_cl(this, false /* fix_oop_relocations */);
|
||||||
|
thread->oops_do(this, &code_cl);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <bool VisitReferents>
|
template <bool VisitReferents>
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include "gc/z/zPage.hpp"
|
#include "gc/z/zPage.hpp"
|
||||||
#include "gc/z/zPageTable.inline.hpp"
|
#include "gc/z/zPageTable.inline.hpp"
|
||||||
#include "gc/z/zRootsIterator.hpp"
|
#include "gc/z/zRootsIterator.hpp"
|
||||||
|
#include "gc/z/zStackWatermark.hpp"
|
||||||
#include "gc/z/zStat.hpp"
|
#include "gc/z/zStat.hpp"
|
||||||
#include "gc/z/zTask.hpp"
|
#include "gc/z/zTask.hpp"
|
||||||
#include "gc/z/zThread.inline.hpp"
|
#include "gc/z/zThread.inline.hpp"
|
||||||
|
@ -46,6 +47,8 @@
|
||||||
#include "runtime/handshake.hpp"
|
#include "runtime/handshake.hpp"
|
||||||
#include "runtime/prefetch.inline.hpp"
|
#include "runtime/prefetch.inline.hpp"
|
||||||
#include "runtime/safepointMechanism.hpp"
|
#include "runtime/safepointMechanism.hpp"
|
||||||
|
#include "runtime/stackWatermark.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.inline.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
@ -122,29 +125,6 @@ void ZMark::prepare_mark() {
|
||||||
|
|
||||||
class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
|
class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
|
||||||
public:
|
public:
|
||||||
ZMarkRootsIteratorClosure() {
|
|
||||||
ZThreadLocalAllocBuffer::reset_statistics();
|
|
||||||
}
|
|
||||||
|
|
||||||
~ZMarkRootsIteratorClosure() {
|
|
||||||
ZThreadLocalAllocBuffer::publish_statistics();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_thread(Thread* thread) {
|
|
||||||
// Update thread local address bad mask
|
|
||||||
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
|
|
||||||
|
|
||||||
// Mark invisible root
|
|
||||||
ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field);
|
|
||||||
|
|
||||||
// Retire TLAB
|
|
||||||
ZThreadLocalAllocBuffer::retire(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool should_disarm_nmethods() const {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_oop(oop* p) {
|
virtual void do_oop(oop* p) {
|
||||||
ZBarrier::mark_barrier_on_root_oop_field(p);
|
ZBarrier::mark_barrier_on_root_oop_field(p);
|
||||||
}
|
}
|
||||||
|
@ -631,6 +611,24 @@ void ZMark::work(uint64_t timeout_in_micros) {
|
||||||
|
|
||||||
class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
|
class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
|
||||||
public:
|
public:
|
||||||
|
ZMarkConcurrentRootsIteratorClosure() {
|
||||||
|
ZThreadLocalAllocBuffer::reset_statistics();
|
||||||
|
}
|
||||||
|
|
||||||
|
~ZMarkConcurrentRootsIteratorClosure() {
|
||||||
|
ZThreadLocalAllocBuffer::publish_statistics();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool should_disarm_nmethods() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void do_thread(Thread* thread) {
|
||||||
|
JavaThread* const jt = thread->as_Java_thread();
|
||||||
|
StackWatermarkSet::finish_processing(jt, this, StackWatermarkKind::gc);
|
||||||
|
ZThreadLocalAllocBuffer::update_stats(jt);
|
||||||
|
}
|
||||||
|
|
||||||
virtual void do_oop(oop* p) {
|
virtual void do_oop(oop* p) {
|
||||||
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
|
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -189,27 +189,17 @@ void ZNMethod::flush_nmethod(nmethod* nm) {
|
||||||
|
|
||||||
bool ZNMethod::supports_entry_barrier(nmethod* nm) {
|
bool ZNMethod::supports_entry_barrier(nmethod* nm) {
|
||||||
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||||
if (bs != NULL) {
|
return bs->supports_entry_barrier(nm);
|
||||||
return bs->supports_entry_barrier(nm);
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ZNMethod::is_armed(nmethod* nm) {
|
bool ZNMethod::is_armed(nmethod* nm) {
|
||||||
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||||
if (bs != NULL) {
|
return bs->is_armed(nm);
|
||||||
return bs->is_armed(nm);
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZNMethod::disarm(nmethod* nm) {
|
void ZNMethod::disarm(nmethod* nm) {
|
||||||
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||||
if (bs != NULL) {
|
bs->disarm(nm);
|
||||||
bs->disarm(nm);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
|
void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
|
||||||
|
@ -245,14 +235,28 @@ void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
|
||||||
|
|
||||||
class ZNMethodToOopsDoClosure : public NMethodClosure {
|
class ZNMethodToOopsDoClosure : public NMethodClosure {
|
||||||
private:
|
private:
|
||||||
OopClosure* _cl;
|
OopClosure* const _cl;
|
||||||
|
const bool _should_disarm_nmethods;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ZNMethodToOopsDoClosure(OopClosure* cl) :
|
ZNMethodToOopsDoClosure(OopClosure* cl, bool should_disarm_nmethods) :
|
||||||
_cl(cl) {}
|
_cl(cl),
|
||||||
|
_should_disarm_nmethods(should_disarm_nmethods) {}
|
||||||
|
|
||||||
virtual void do_nmethod(nmethod* nm) {
|
virtual void do_nmethod(nmethod* nm) {
|
||||||
ZNMethod::nmethod_oops_do(nm, _cl);
|
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
|
||||||
|
if (!nm->is_alive()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_should_disarm_nmethods) {
|
||||||
|
if (ZNMethod::is_armed(nm)) {
|
||||||
|
ZNMethod::nmethod_oops_do(nm, _cl);
|
||||||
|
ZNMethod::disarm(nm);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ZNMethod::nmethod_oops_do(nm, _cl);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -264,8 +268,8 @@ void ZNMethod::oops_do_end() {
|
||||||
ZNMethodTable::nmethods_do_end();
|
ZNMethodTable::nmethods_do_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZNMethod::oops_do(OopClosure* cl) {
|
void ZNMethod::oops_do(OopClosure* cl, bool should_disarm_nmethods) {
|
||||||
ZNMethodToOopsDoClosure nmethod_cl(cl);
|
ZNMethodToOopsDoClosure nmethod_cl(cl, should_disarm_nmethods);
|
||||||
ZNMethodTable::nmethods_do(&nmethod_cl);
|
ZNMethodTable::nmethods_do(&nmethod_cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -52,7 +52,7 @@ public:
|
||||||
|
|
||||||
static void oops_do_begin();
|
static void oops_do_begin();
|
||||||
static void oops_do_end();
|
static void oops_do_end();
|
||||||
static void oops_do(OopClosure* cl);
|
static void oops_do(OopClosure* cl, bool should_disarm_nmethods);
|
||||||
|
|
||||||
static ZReentrantLock* lock_for_nmethod(nmethod* nm);
|
static ZReentrantLock* lock_for_nmethod(nmethod* nm);
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -45,21 +45,6 @@ ZRelocate::ZRelocate(ZWorkers* workers) :
|
||||||
|
|
||||||
class ZRelocateRootsIteratorClosure : public ZRootsIteratorClosure {
|
class ZRelocateRootsIteratorClosure : public ZRootsIteratorClosure {
|
||||||
public:
|
public:
|
||||||
virtual void do_thread(Thread* thread) {
|
|
||||||
// Update thread local address bad mask
|
|
||||||
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
|
|
||||||
|
|
||||||
// Relocate invisible root
|
|
||||||
ZThreadLocalData::do_invisible_root(thread, ZBarrier::relocate_barrier_on_root_oop_field);
|
|
||||||
|
|
||||||
// Remap TLAB
|
|
||||||
ZThreadLocalAllocBuffer::remap(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool should_disarm_nmethods() const {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_oop(oop* p) {
|
virtual void do_oop(oop* p) {
|
||||||
ZBarrier::relocate_barrier_on_root_oop_field(p);
|
ZBarrier::relocate_barrier_on_root_oop_field(p);
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,9 @@
|
||||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||||
#include "gc/z/zBarrierSetNMethod.hpp"
|
#include "gc/z/zBarrierSetNMethod.hpp"
|
||||||
#include "gc/z/zGlobals.hpp"
|
#include "gc/z/zGlobals.hpp"
|
||||||
|
#include "gc/z/zLock.inline.hpp"
|
||||||
#include "gc/z/zNMethod.hpp"
|
#include "gc/z/zNMethod.hpp"
|
||||||
|
#include "gc/z/zNMethodTable.hpp"
|
||||||
#include "gc/z/zOopClosures.inline.hpp"
|
#include "gc/z/zOopClosures.inline.hpp"
|
||||||
#include "gc/z/zRootsIterator.hpp"
|
#include "gc/z/zRootsIterator.hpp"
|
||||||
#include "gc/z/zStat.hpp"
|
#include "gc/z/zStat.hpp"
|
||||||
|
@ -46,24 +48,22 @@
|
||||||
#include "prims/resolvedMethodTable.hpp"
|
#include "prims/resolvedMethodTable.hpp"
|
||||||
#include "runtime/atomic.hpp"
|
#include "runtime/atomic.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
|
#include "runtime/stackWatermark.hpp"
|
||||||
#include "runtime/synchronizer.hpp"
|
#include "runtime/synchronizer.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
#include "utilities/debug.hpp"
|
#include "utilities/debug.hpp"
|
||||||
|
|
||||||
static const ZStatSubPhase ZSubPhasePauseRootsSetup("Pause Roots Setup");
|
|
||||||
static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
|
static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
|
||||||
static const ZStatSubPhase ZSubPhasePauseRootsTeardown("Pause Roots Teardown");
|
|
||||||
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
|
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
|
||||||
static const ZStatSubPhase ZSubPhasePauseRootsVMThread("Pause Roots VM Thread");
|
|
||||||
static const ZStatSubPhase ZSubPhasePauseRootsJavaThreads("Pause Roots Java Threads");
|
|
||||||
static const ZStatSubPhase ZSubPhasePauseRootsCodeCache("Pause Roots CodeCache");
|
|
||||||
|
|
||||||
static const ZStatSubPhase ZSubPhaseConcurrentRootsSetup("Concurrent Roots Setup");
|
static const ZStatSubPhase ZSubPhaseConcurrentRootsSetup("Concurrent Roots Setup");
|
||||||
static const ZStatSubPhase ZSubPhaseConcurrentRoots("Concurrent Roots");
|
static const ZStatSubPhase ZSubPhaseConcurrentRoots("Concurrent Roots");
|
||||||
static const ZStatSubPhase ZSubPhaseConcurrentRootsTeardown("Concurrent Roots Teardown");
|
static const ZStatSubPhase ZSubPhaseConcurrentRootsTeardown("Concurrent Roots Teardown");
|
||||||
static const ZStatSubPhase ZSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet");
|
static const ZStatSubPhase ZSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet");
|
||||||
static const ZStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph");
|
static const ZStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph");
|
||||||
|
static const ZStatSubPhase ZSubPhaseConcurrentRootsJavaThreads("Concurrent Roots Java Threads");
|
||||||
|
static const ZStatSubPhase ZSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache");
|
||||||
|
|
||||||
static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup");
|
static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup");
|
||||||
static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots");
|
static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots");
|
||||||
|
@ -127,45 +127,6 @@ void ZParallelWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, ZRoots
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ZRootsIteratorCodeBlobClosure : public CodeBlobClosure {
|
|
||||||
private:
|
|
||||||
ZRootsIteratorClosure* const _cl;
|
|
||||||
const bool _should_disarm_nmethods;
|
|
||||||
|
|
||||||
public:
|
|
||||||
ZRootsIteratorCodeBlobClosure(ZRootsIteratorClosure* cl) :
|
|
||||||
_cl(cl),
|
|
||||||
_should_disarm_nmethods(cl->should_disarm_nmethods()) {}
|
|
||||||
|
|
||||||
virtual void do_code_blob(CodeBlob* cb) {
|
|
||||||
nmethod* const nm = cb->as_nmethod_or_null();
|
|
||||||
if (nm != NULL && nm->oops_do_try_claim()) {
|
|
||||||
ZNMethod::nmethod_oops_do(nm, _cl);
|
|
||||||
assert(!ZNMethod::supports_entry_barrier(nm) ||
|
|
||||||
ZNMethod::is_armed(nm) == _should_disarm_nmethods, "Invalid state");
|
|
||||||
if (_should_disarm_nmethods) {
|
|
||||||
ZNMethod::disarm(nm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class ZRootsIteratorThreadClosure : public ThreadClosure {
|
|
||||||
private:
|
|
||||||
ZRootsIteratorClosure* const _cl;
|
|
||||||
ResourceMark _rm;
|
|
||||||
|
|
||||||
public:
|
|
||||||
ZRootsIteratorThreadClosure(ZRootsIteratorClosure* cl) :
|
|
||||||
_cl(cl) {}
|
|
||||||
|
|
||||||
virtual void do_thread(Thread* thread) {
|
|
||||||
ZRootsIteratorCodeBlobClosure code_cl(_cl);
|
|
||||||
thread->oops_do(_cl, ClassUnloading ? &code_cl : NULL);
|
|
||||||
_cl->do_thread(thread);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
ZJavaThreadsIterator::ZJavaThreadsIterator() :
|
ZJavaThreadsIterator::ZJavaThreadsIterator() :
|
||||||
_threads(),
|
_threads(),
|
||||||
_claimed(0) {}
|
_claimed(0) {}
|
||||||
|
@ -182,31 +143,8 @@ void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {
|
||||||
|
|
||||||
ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
|
ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
|
||||||
_visit_jvmti_weak_export(visit_jvmti_weak_export),
|
_visit_jvmti_weak_export(visit_jvmti_weak_export),
|
||||||
_java_threads_iter(),
|
_jvmti_weak_export(this) {
|
||||||
_jvmti_weak_export(this),
|
|
||||||
_vm_thread(this),
|
|
||||||
_java_threads(this),
|
|
||||||
_code_cache(this) {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||||
ZStatTimer timer(ZSubPhasePauseRootsSetup);
|
|
||||||
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
|
|
||||||
if (ClassUnloading) {
|
|
||||||
nmethod::oops_do_marking_prologue();
|
|
||||||
} else {
|
|
||||||
ZNMethod::oops_do_begin();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ZRootsIterator::~ZRootsIterator() {
|
|
||||||
ZStatTimer timer(ZSubPhasePauseRootsTeardown);
|
|
||||||
ResourceMark rm;
|
|
||||||
if (ClassUnloading) {
|
|
||||||
nmethod::oops_do_marking_epilogue();
|
|
||||||
} else {
|
|
||||||
ZNMethod::oops_do_end();
|
|
||||||
}
|
|
||||||
|
|
||||||
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZRootsIterator::do_jvmti_weak_export(ZRootsIteratorClosure* cl) {
|
void ZRootsIterator::do_jvmti_weak_export(ZRootsIteratorClosure* cl) {
|
||||||
|
@ -215,30 +153,8 @@ void ZRootsIterator::do_jvmti_weak_export(ZRootsIteratorClosure* cl) {
|
||||||
JvmtiExport::weak_oops_do(&always_alive, cl);
|
JvmtiExport::weak_oops_do(&always_alive, cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZRootsIterator::do_vm_thread(ZRootsIteratorClosure* cl) {
|
|
||||||
ZStatTimer timer(ZSubPhasePauseRootsVMThread);
|
|
||||||
ZRootsIteratorThreadClosure thread_cl(cl);
|
|
||||||
thread_cl.do_thread(VMThread::vm_thread());
|
|
||||||
}
|
|
||||||
|
|
||||||
void ZRootsIterator::do_java_threads(ZRootsIteratorClosure* cl) {
|
|
||||||
ZStatTimer timer(ZSubPhasePauseRootsJavaThreads);
|
|
||||||
ZRootsIteratorThreadClosure thread_cl(cl);
|
|
||||||
_java_threads_iter.threads_do(&thread_cl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ZRootsIterator::do_code_cache(ZRootsIteratorClosure* cl) {
|
|
||||||
ZStatTimer timer(ZSubPhasePauseRootsCodeCache);
|
|
||||||
ZNMethod::oops_do(cl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||||
ZStatTimer timer(ZSubPhasePauseRoots);
|
ZStatTimer timer(ZSubPhasePauseRoots);
|
||||||
_vm_thread.oops_do(cl);
|
|
||||||
_java_threads.oops_do(cl);
|
|
||||||
if (!ClassUnloading) {
|
|
||||||
_code_cache.oops_do(cl);
|
|
||||||
}
|
|
||||||
if (_visit_jvmti_weak_export) {
|
if (_visit_jvmti_weak_export) {
|
||||||
_jvmti_weak_export.oops_do(cl);
|
_jvmti_weak_export.oops_do(cl);
|
||||||
}
|
}
|
||||||
|
@ -246,15 +162,24 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||||
|
|
||||||
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) :
|
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) :
|
||||||
_oop_storage_set_iter(),
|
_oop_storage_set_iter(),
|
||||||
|
_java_threads_iter(),
|
||||||
_cld_claim(cld_claim),
|
_cld_claim(cld_claim),
|
||||||
_oop_storage_set(this),
|
_oop_storage_set(this),
|
||||||
_class_loader_data_graph(this) {
|
_class_loader_data_graph(this),
|
||||||
|
_java_threads(this),
|
||||||
|
_code_cache(this) {
|
||||||
ZStatTimer timer(ZSubPhaseConcurrentRootsSetup);
|
ZStatTimer timer(ZSubPhaseConcurrentRootsSetup);
|
||||||
ClassLoaderDataGraph::clear_claimed_marks(cld_claim);
|
ClassLoaderDataGraph::clear_claimed_marks(cld_claim);
|
||||||
|
if (!ClassUnloading) {
|
||||||
|
ZNMethodTable::nmethods_do_begin();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ZConcurrentRootsIterator::~ZConcurrentRootsIterator() {
|
ZConcurrentRootsIterator::~ZConcurrentRootsIterator() {
|
||||||
ZStatTimer timer(ZSubPhaseConcurrentRootsTeardown);
|
ZStatTimer timer(ZSubPhaseConcurrentRootsTeardown);
|
||||||
|
if (!ClassUnloading) {
|
||||||
|
ZNMethodTable::nmethods_do_end();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZConcurrentRootsIterator::do_oop_storage_set(ZRootsIteratorClosure* cl) {
|
void ZConcurrentRootsIterator::do_oop_storage_set(ZRootsIteratorClosure* cl) {
|
||||||
|
@ -268,10 +193,41 @@ void ZConcurrentRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure*
|
||||||
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
|
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class ZConcurrentRootsIteratorThreadClosure : public ThreadClosure {
|
||||||
|
private:
|
||||||
|
ZRootsIteratorClosure* const _cl;
|
||||||
|
// The resource mark is needed because interpreter oop maps are not reused in concurrent mode.
|
||||||
|
// Instead, they are temporary and resource allocated.
|
||||||
|
ResourceMark _rm;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ZConcurrentRootsIteratorThreadClosure(ZRootsIteratorClosure* cl) :
|
||||||
|
_cl(cl) {}
|
||||||
|
|
||||||
|
virtual void do_thread(Thread* thread) {
|
||||||
|
_cl->do_thread(thread);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void ZConcurrentRootsIterator::do_code_cache(ZRootsIteratorClosure* cl) {
|
||||||
|
ZStatTimer timer(ZSubPhaseConcurrentRootsCodeCache);
|
||||||
|
ZNMethod::oops_do(cl, cl->should_disarm_nmethods());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZConcurrentRootsIterator::do_java_threads(ZRootsIteratorClosure* cl) {
|
||||||
|
ZStatTimer timer(ZSubPhaseConcurrentRootsJavaThreads);
|
||||||
|
ZConcurrentRootsIteratorThreadClosure thread_cl(cl);
|
||||||
|
_java_threads_iter.threads_do(&thread_cl);
|
||||||
|
}
|
||||||
|
|
||||||
void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||||
ZStatTimer timer(ZSubPhaseConcurrentRoots);
|
ZStatTimer timer(ZSubPhaseConcurrentRoots);
|
||||||
_oop_storage_set.oops_do(cl);
|
_oop_storage_set.oops_do(cl);
|
||||||
_class_loader_data_graph.oops_do(cl);
|
_class_loader_data_graph.oops_do(cl);
|
||||||
|
_java_threads.oops_do(cl);
|
||||||
|
if (!ClassUnloading) {
|
||||||
|
_code_cache.oops_do(cl);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ZWeakRootsIterator::ZWeakRootsIterator() :
|
ZWeakRootsIterator::ZWeakRootsIterator() :
|
||||||
|
|
|
@ -107,22 +107,14 @@ public:
|
||||||
|
|
||||||
class ZRootsIterator {
|
class ZRootsIterator {
|
||||||
private:
|
private:
|
||||||
const bool _visit_jvmti_weak_export;
|
const bool _visit_jvmti_weak_export;
|
||||||
ZJavaThreadsIterator _java_threads_iter;
|
|
||||||
|
|
||||||
void do_jvmti_weak_export(ZRootsIteratorClosure* cl);
|
void do_jvmti_weak_export(ZRootsIteratorClosure* cl);
|
||||||
void do_vm_thread(ZRootsIteratorClosure* cl);
|
|
||||||
void do_java_threads(ZRootsIteratorClosure* cl);
|
|
||||||
void do_code_cache(ZRootsIteratorClosure* cl);
|
|
||||||
|
|
||||||
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
|
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
|
||||||
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_vm_thread> _vm_thread;
|
|
||||||
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_java_threads> _java_threads;
|
|
||||||
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_code_cache> _code_cache;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ZRootsIterator(bool visit_jvmti_weak_export = false);
|
ZRootsIterator(bool visit_jvmti_weak_export = false);
|
||||||
~ZRootsIterator();
|
|
||||||
|
|
||||||
void oops_do(ZRootsIteratorClosure* cl);
|
void oops_do(ZRootsIteratorClosure* cl);
|
||||||
};
|
};
|
||||||
|
@ -130,13 +122,18 @@ public:
|
||||||
class ZConcurrentRootsIterator {
|
class ZConcurrentRootsIterator {
|
||||||
private:
|
private:
|
||||||
ZOopStorageSetStrongIterator _oop_storage_set_iter;
|
ZOopStorageSetStrongIterator _oop_storage_set_iter;
|
||||||
|
ZJavaThreadsIterator _java_threads_iter;
|
||||||
const int _cld_claim;
|
const int _cld_claim;
|
||||||
|
|
||||||
void do_oop_storage_set(ZRootsIteratorClosure* cl);
|
void do_oop_storage_set(ZRootsIteratorClosure* cl);
|
||||||
|
void do_java_threads(ZRootsIteratorClosure* cl);
|
||||||
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
|
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
|
||||||
|
void do_code_cache(ZRootsIteratorClosure* cl);
|
||||||
|
|
||||||
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_oop_storage_set> _oop_storage_set;
|
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_oop_storage_set> _oop_storage_set;
|
||||||
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
|
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
|
||||||
|
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_java_threads> _java_threads;
|
||||||
|
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_code_cache> _code_cache;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ZConcurrentRootsIterator(int cld_claim);
|
ZConcurrentRootsIterator(int cld_claim);
|
||||||
|
|
100
src/hotspot/share/gc/z/zStackWatermark.cpp
Normal file
100
src/hotspot/share/gc/z/zStackWatermark.cpp
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc/z/zAddress.hpp"
|
||||||
|
#include "gc/z/zBarrier.inline.hpp"
|
||||||
|
#include "gc/z/zOopClosures.inline.hpp"
|
||||||
|
#include "gc/z/zStackWatermark.hpp"
|
||||||
|
#include "gc/z/zThread.inline.hpp"
|
||||||
|
#include "gc/z/zThreadLocalAllocBuffer.hpp"
|
||||||
|
#include "gc/z/zThreadLocalData.hpp"
|
||||||
|
#include "gc/z/zVerify.hpp"
|
||||||
|
#include "memory/resourceArea.inline.hpp"
|
||||||
|
#include "runtime/frame.inline.hpp"
|
||||||
|
#include "utilities/preserveException.hpp"
|
||||||
|
|
||||||
|
ZOnStackCodeBlobClosure::ZOnStackCodeBlobClosure() :
|
||||||
|
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
|
||||||
|
|
||||||
|
void ZOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
||||||
|
nmethod* const nm = cb->as_nmethod_or_null();
|
||||||
|
if (nm != NULL) {
|
||||||
|
const bool result = _bs_nm->nmethod_entry_barrier(nm);
|
||||||
|
assert(result, "NMethod on-stack must be alive");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ThreadLocalAllocStats& ZStackWatermark::stats() {
|
||||||
|
return _stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t ZStackWatermark::epoch_id() const {
|
||||||
|
return *ZAddressBadMaskHighOrderBitsAddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ZStackWatermark::ZStackWatermark(JavaThread* jt) :
|
||||||
|
StackWatermark(jt, StackWatermarkKind::gc, *ZAddressBadMaskHighOrderBitsAddr),
|
||||||
|
_jt_cl(),
|
||||||
|
_cb_cl(),
|
||||||
|
_stats() {}
|
||||||
|
|
||||||
|
OopClosure* ZStackWatermark::closure_from_context(void* context) {
|
||||||
|
if (context != NULL) {
|
||||||
|
assert(ZThread::is_worker(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context));
|
||||||
|
return reinterpret_cast<OopClosure*>(context);
|
||||||
|
} else {
|
||||||
|
return &_jt_cl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZStackWatermark::start_processing_impl(void* context) {
|
||||||
|
// Verify the head (no_frames) of the thread is bad before fixing it.
|
||||||
|
ZVerify::verify_thread_head_bad(_jt);
|
||||||
|
|
||||||
|
// Process the non-frame part of the thread
|
||||||
|
_jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
|
||||||
|
ZThreadLocalData::do_invisible_root(_jt, ZBarrier::load_barrier_on_invisible_root_oop_field);
|
||||||
|
|
||||||
|
// Verification of frames is done after processing of the "head" (no_frames).
|
||||||
|
// The reason is that the exception oop is fiddled with during frame processing.
|
||||||
|
ZVerify::verify_thread_frames_bad(_jt);
|
||||||
|
|
||||||
|
// Update thread local address bad mask
|
||||||
|
ZThreadLocalData::set_address_bad_mask(_jt, ZAddressBadMask);
|
||||||
|
|
||||||
|
// Retire TLAB
|
||||||
|
if (ZGlobalPhase == ZPhaseMark) {
|
||||||
|
ZThreadLocalAllocBuffer::retire(_jt, &_stats);
|
||||||
|
} else {
|
||||||
|
ZThreadLocalAllocBuffer::remap(_jt);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publishes the processing start to concurrent threads
|
||||||
|
StackWatermark::start_processing_impl(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
|
||||||
|
ZVerify::verify_frame_bad(fr, register_map);
|
||||||
|
fr.oops_do(closure_from_context(context), &_cb_cl, ®ister_map, DerivedPointerIterationMode::_directly);
|
||||||
|
}
|
67
src/hotspot/share/gc/z/zStackWatermark.hpp
Normal file
67
src/hotspot/share/gc/z/zStackWatermark.hpp
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_GC_Z_ZSTACKWATERMARK_HPP
|
||||||
|
#define SHARE_GC_Z_ZSTACKWATERMARK_HPP
|
||||||
|
|
||||||
|
#include "gc/shared/barrierSet.hpp"
|
||||||
|
#include "gc/shared/barrierSetNMethod.hpp"
|
||||||
|
#include "gc/z/zOopClosures.hpp"
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/iterator.hpp"
|
||||||
|
#include "oops/oopsHierarchy.hpp"
|
||||||
|
#include "runtime/stackWatermark.hpp"
|
||||||
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
|
class frame;
|
||||||
|
class JavaThread;
|
||||||
|
|
||||||
|
class ZOnStackCodeBlobClosure : public CodeBlobClosure {
|
||||||
|
private:
|
||||||
|
BarrierSetNMethod* _bs_nm;
|
||||||
|
|
||||||
|
virtual void do_code_blob(CodeBlob* cb);
|
||||||
|
|
||||||
|
public:
|
||||||
|
ZOnStackCodeBlobClosure();
|
||||||
|
};
|
||||||
|
|
||||||
|
class ZStackWatermark : public StackWatermark {
|
||||||
|
private:
|
||||||
|
ZLoadBarrierOopClosure _jt_cl;
|
||||||
|
ZOnStackCodeBlobClosure _cb_cl;
|
||||||
|
ThreadLocalAllocStats _stats;
|
||||||
|
|
||||||
|
OopClosure* closure_from_context(void* context);
|
||||||
|
|
||||||
|
virtual uint32_t epoch_id() const;
|
||||||
|
virtual void start_processing_impl(void* context);
|
||||||
|
virtual void process(const frame& fr, RegisterMap& register_map, void* context);
|
||||||
|
|
||||||
|
public:
|
||||||
|
ZStackWatermark(JavaThread* jt);
|
||||||
|
|
||||||
|
ThreadLocalAllocStats& stats();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_GC_Z_ZSTACKWATERMARK_HPP
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -23,9 +23,11 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/z/zAddress.inline.hpp"
|
#include "gc/z/zAddress.inline.hpp"
|
||||||
|
#include "gc/z/zStackWatermark.hpp"
|
||||||
#include "gc/z/zThreadLocalAllocBuffer.hpp"
|
#include "gc/z/zThreadLocalAllocBuffer.hpp"
|
||||||
#include "gc/z/zValue.inline.hpp"
|
#include "gc/z/zValue.inline.hpp"
|
||||||
#include "runtime/globals.hpp"
|
#include "runtime/globals.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.inline.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
||||||
ZPerWorker<ThreadLocalAllocStats>* ZThreadLocalAllocBuffer::_stats = NULL;
|
ZPerWorker<ThreadLocalAllocStats>* ZThreadLocalAllocBuffer::_stats = NULL;
|
||||||
|
@ -64,9 +66,9 @@ static void fixup_address(HeapWord** p) {
|
||||||
*p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
|
*p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZThreadLocalAllocBuffer::retire(Thread* thread) {
|
void ZThreadLocalAllocBuffer::retire(JavaThread* thread, ThreadLocalAllocStats* stats) {
|
||||||
if (UseTLAB && thread->is_Java_thread()) {
|
if (UseTLAB) {
|
||||||
ThreadLocalAllocStats* const stats = _stats->addr();
|
stats->reset();
|
||||||
thread->tlab().addresses_do(fixup_address);
|
thread->tlab().addresses_do(fixup_address);
|
||||||
thread->tlab().retire(stats);
|
thread->tlab().retire(stats);
|
||||||
if (ResizeTLAB) {
|
if (ResizeTLAB) {
|
||||||
|
@ -75,8 +77,15 @@ void ZThreadLocalAllocBuffer::retire(Thread* thread) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZThreadLocalAllocBuffer::remap(Thread* thread) {
|
void ZThreadLocalAllocBuffer::remap(JavaThread* thread) {
|
||||||
if (UseTLAB && thread->is_Java_thread()) {
|
if (UseTLAB) {
|
||||||
thread->tlab().addresses_do(fixup_address);
|
thread->tlab().addresses_do(fixup_address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ZThreadLocalAllocBuffer::update_stats(JavaThread* thread) {
|
||||||
|
if (UseTLAB) {
|
||||||
|
ZStackWatermark* const watermark = StackWatermarkSet::get<ZStackWatermark>(thread, StackWatermarkKind::gc);
|
||||||
|
_stats->addr()->update(watermark->stats());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -28,6 +28,8 @@
|
||||||
#include "gc/z/zValue.hpp"
|
#include "gc/z/zValue.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
|
||||||
|
class JavaThread;
|
||||||
|
|
||||||
class ZThreadLocalAllocBuffer : public AllStatic {
|
class ZThreadLocalAllocBuffer : public AllStatic {
|
||||||
private:
|
private:
|
||||||
static ZPerWorker<ThreadLocalAllocStats>* _stats;
|
static ZPerWorker<ThreadLocalAllocStats>* _stats;
|
||||||
|
@ -38,8 +40,9 @@ public:
|
||||||
static void reset_statistics();
|
static void reset_statistics();
|
||||||
static void publish_statistics();
|
static void publish_statistics();
|
||||||
|
|
||||||
static void retire(Thread* thread);
|
static void retire(JavaThread* thread, ThreadLocalAllocStats* stats);
|
||||||
static void remap(Thread* thread);
|
static void remap(JavaThread* thread);
|
||||||
|
static void update_stats(JavaThread* thread);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_GC_Z_ZTHREADLOCALALLOCBUFFER_HPP
|
#endif // SHARE_GC_Z_ZTHREADLOCALALLOCBUFFER_HPP
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -84,7 +84,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
static ByteSize nmethod_disarmed_offset() {
|
static ByteSize nmethod_disarmed_offset() {
|
||||||
return address_bad_mask_offset() + in_ByteSize(ZNMethodDisarmedOffset);
|
return address_bad_mask_offset() + in_ByteSize(ZAddressBadMaskHighOrderBitsOffset);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -29,10 +29,16 @@
|
||||||
#include "gc/z/zPageAllocator.hpp"
|
#include "gc/z/zPageAllocator.hpp"
|
||||||
#include "gc/z/zResurrection.hpp"
|
#include "gc/z/zResurrection.hpp"
|
||||||
#include "gc/z/zRootsIterator.hpp"
|
#include "gc/z/zRootsIterator.hpp"
|
||||||
|
#include "gc/z/zStackWatermark.hpp"
|
||||||
#include "gc/z/zStat.hpp"
|
#include "gc/z/zStat.hpp"
|
||||||
#include "gc/z/zVerify.hpp"
|
#include "gc/z/zVerify.hpp"
|
||||||
#include "memory/iterator.inline.hpp"
|
#include "memory/iterator.inline.hpp"
|
||||||
|
#include "memory/resourceArea.inline.hpp"
|
||||||
#include "oops/oop.hpp"
|
#include "oops/oop.hpp"
|
||||||
|
#include "runtime/frame.inline.hpp"
|
||||||
|
#include "runtime/stackWatermark.inline.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.inline.hpp"
|
||||||
|
#include "utilities/preserveException.hpp"
|
||||||
|
|
||||||
#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p)
|
#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p)
|
||||||
|
|
||||||
|
@ -55,16 +61,127 @@ static void z_verify_possibly_weak_oop(oop* p) {
|
||||||
}
|
}
|
||||||
|
|
||||||
class ZVerifyRootClosure : public ZRootsIteratorClosure {
|
class ZVerifyRootClosure : public ZRootsIteratorClosure {
|
||||||
|
private:
|
||||||
|
const bool _verify_fixed;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
ZVerifyRootClosure(bool verify_fixed) :
|
||||||
|
_verify_fixed(verify_fixed) {}
|
||||||
|
|
||||||
virtual void do_oop(oop* p) {
|
virtual void do_oop(oop* p) {
|
||||||
z_verify_oop(p);
|
if (_verify_fixed) {
|
||||||
|
z_verify_oop(p);
|
||||||
|
} else {
|
||||||
|
// Don't know the state of the oop.
|
||||||
|
oop obj = *p;
|
||||||
|
obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(&obj);
|
||||||
|
z_verify_oop(&obj);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void do_oop(narrowOop*) {
|
virtual void do_oop(narrowOop*) {
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void do_thread(Thread* thread);
|
||||||
|
|
||||||
|
bool verify_fixed() const {
|
||||||
|
return _verify_fixed;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure {
|
||||||
|
public:
|
||||||
|
ZVerifyCodeBlobClosure(ZVerifyRootClosure* _cl) :
|
||||||
|
CodeBlobToOopClosure(_cl, false /* fix_relocations */) {}
|
||||||
|
|
||||||
|
virtual void do_code_blob(CodeBlob* cb) {
|
||||||
|
CodeBlobToOopClosure::do_code_blob(cb);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ZVerifyStack : public OopClosure {
|
||||||
|
private:
|
||||||
|
ZVerifyRootClosure* const _cl;
|
||||||
|
JavaThread* const _jt;
|
||||||
|
uint64_t _last_good;
|
||||||
|
bool _verifying_bad_frames;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ZVerifyStack(ZVerifyRootClosure* cl, JavaThread* jt) :
|
||||||
|
_cl(cl),
|
||||||
|
_jt(jt),
|
||||||
|
_last_good(0),
|
||||||
|
_verifying_bad_frames(false) {
|
||||||
|
ZStackWatermark* const stack_watermark = StackWatermarkSet::get<ZStackWatermark>(jt, StackWatermarkKind::gc);
|
||||||
|
|
||||||
|
if (_cl->verify_fixed()) {
|
||||||
|
assert(stack_watermark->processing_started(), "Should already have been fixed");
|
||||||
|
assert(stack_watermark->processing_completed(), "Should already have been fixed");
|
||||||
|
} else {
|
||||||
|
// We don't really know the state of the stack, verify watermark.
|
||||||
|
if (!stack_watermark->processing_started()) {
|
||||||
|
_verifying_bad_frames = true;
|
||||||
|
} else {
|
||||||
|
// Not time yet to verify bad frames
|
||||||
|
_last_good = stack_watermark->last_processed();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void do_oop(oop* p) {
|
||||||
|
if (_verifying_bad_frames) {
|
||||||
|
const oop obj = *p;
|
||||||
|
guarantee(!ZAddress::is_good(ZOop::to_address(obj)), BAD_OOP_ARG(obj, p));
|
||||||
|
}
|
||||||
|
_cl->do_oop(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void do_oop(narrowOop* p) {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
|
void prepare_next_frame(frame& frame) {
|
||||||
|
if (_cl->verify_fixed()) {
|
||||||
|
// All frames need to be good
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The verification has two modes, depending on whether we have reached the
|
||||||
|
// last processed frame or not. Before it is reached, we expect everything to
|
||||||
|
// be good. After reaching it, we expect everything to be bad.
|
||||||
|
const uintptr_t sp = reinterpret_cast<uintptr_t>(frame.sp());
|
||||||
|
|
||||||
|
if (!_verifying_bad_frames && sp == _last_good) {
|
||||||
|
// Found the last good frame, now verify the bad ones
|
||||||
|
_verifying_bad_frames = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void verify_frames() {
|
||||||
|
ZVerifyCodeBlobClosure cb_cl(_cl);
|
||||||
|
for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */);
|
||||||
|
!frames.is_done();
|
||||||
|
frames.next()) {
|
||||||
|
frame& frame = *frames.current();
|
||||||
|
frame.oops_do(this, &cb_cl, frames.register_map(), DerivedPointerIterationMode::_ignore);
|
||||||
|
prepare_next_frame(frame);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void ZVerifyRootClosure::do_thread(Thread* thread) {
|
||||||
|
thread->oops_do_no_frames(this, NULL);
|
||||||
|
|
||||||
|
JavaThread* const jt = thread->as_Java_thread();
|
||||||
|
if (!jt->has_last_Java_frame()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ZVerifyStack verify_stack(this, jt);
|
||||||
|
verify_stack.verify_frames();
|
||||||
|
}
|
||||||
|
|
||||||
class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure, public ZRootsIteratorClosure {
|
class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure, public ZRootsIteratorClosure {
|
||||||
private:
|
private:
|
||||||
const bool _verify_weaks;
|
const bool _verify_weaks;
|
||||||
|
@ -101,36 +218,36 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename RootsIterator>
|
template <typename RootsIterator>
|
||||||
void ZVerify::roots() {
|
void ZVerify::roots(bool verify_fixed) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
||||||
assert(!ZResurrection::is_blocked(), "Invalid phase");
|
assert(!ZResurrection::is_blocked(), "Invalid phase");
|
||||||
|
|
||||||
if (ZVerifyRoots) {
|
if (ZVerifyRoots) {
|
||||||
ZVerifyRootClosure cl;
|
ZVerifyRootClosure cl(verify_fixed);
|
||||||
RootsIterator iter;
|
RootsIterator iter;
|
||||||
iter.oops_do(&cl);
|
iter.oops_do(&cl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::roots_strong() {
|
void ZVerify::roots_strong() {
|
||||||
roots<ZRootsIterator>();
|
roots<ZRootsIterator>(true /* verify_fixed */);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::roots_weak() {
|
void ZVerify::roots_weak() {
|
||||||
roots<ZWeakRootsIterator>();
|
roots<ZWeakRootsIterator>(true /* verify_fixed */);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::roots_concurrent_strong() {
|
void ZVerify::roots_concurrent_strong(bool verify_fixed) {
|
||||||
roots<ZConcurrentRootsIteratorClaimNone>();
|
roots<ZConcurrentRootsIteratorClaimNone>(verify_fixed);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::roots_concurrent_weak() {
|
void ZVerify::roots_concurrent_weak() {
|
||||||
roots<ZConcurrentWeakRootsIterator>();
|
roots<ZConcurrentWeakRootsIterator>(true /* verify_fixed */);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::roots(bool verify_weaks) {
|
void ZVerify::roots(bool verify_concurrent_strong, bool verify_weaks) {
|
||||||
roots_strong();
|
roots_strong();
|
||||||
roots_concurrent_strong();
|
roots_concurrent_strong(verify_concurrent_strong);
|
||||||
if (verify_weaks) {
|
if (verify_weaks) {
|
||||||
roots_weak();
|
roots_weak();
|
||||||
roots_concurrent_weak();
|
roots_concurrent_weak();
|
||||||
|
@ -149,27 +266,27 @@ void ZVerify::objects(bool verify_weaks) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::roots_and_objects(bool verify_weaks) {
|
void ZVerify::roots_and_objects(bool verify_concurrent_strong, bool verify_weaks) {
|
||||||
roots(verify_weaks);
|
roots(verify_concurrent_strong, verify_weaks);
|
||||||
objects(verify_weaks);
|
objects(verify_weaks);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::before_zoperation() {
|
void ZVerify::before_zoperation() {
|
||||||
// Verify strong roots
|
// Verify strong roots
|
||||||
ZStatTimerDisable disable;
|
ZStatTimerDisable disable;
|
||||||
roots_strong();
|
roots(false /* verify_concurrent_strong */, false /* verify_weaks */);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::after_mark() {
|
void ZVerify::after_mark() {
|
||||||
// Verify all strong roots and strong references
|
// Verify all strong roots and strong references
|
||||||
ZStatTimerDisable disable;
|
ZStatTimerDisable disable;
|
||||||
roots_and_objects(false /* verify_weaks */);
|
roots_and_objects(true /* verify_concurrent_strong*/, false /* verify_weaks */);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZVerify::after_weak_processing() {
|
void ZVerify::after_weak_processing() {
|
||||||
// Verify all roots and all references
|
// Verify all roots and all references
|
||||||
ZStatTimerDisable disable;
|
ZStatTimerDisable disable;
|
||||||
roots_and_objects(true /* verify_weaks */);
|
roots_and_objects(true /* verify_concurrent_strong*/, true /* verify_weaks */);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool Map>
|
template <bool Map>
|
||||||
|
@ -206,3 +323,59 @@ ZVerifyViewsFlip::~ZVerifyViewsFlip() {
|
||||||
ZHeap::heap()->pages_do(&cl);
|
ZHeap::heap()->pages_do(&cl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
|
||||||
|
class ZVerifyBadOopClosure : public OopClosure {
|
||||||
|
public:
|
||||||
|
virtual void do_oop(oop* p) {
|
||||||
|
const oop o = *p;
|
||||||
|
assert(!ZAddress::is_good(ZOop::to_address(o)), "Should not be good: " PTR_FORMAT, p2i(o));
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void do_oop(narrowOop* p) {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// This class encapsulates various marks we need to deal with calling the
|
||||||
|
// frame iteration code from arbitrary points in the runtime. It is mostly
|
||||||
|
// due to problems that we might want to eventually clean up inside of the
|
||||||
|
// frame iteration code, such as creating random handles even though there
|
||||||
|
// is no safepoint to protect against, and fiddling around with exceptions.
|
||||||
|
class StackWatermarkProcessingMark {
|
||||||
|
ResetNoHandleMark _rnhm;
|
||||||
|
HandleMark _hm;
|
||||||
|
PreserveExceptionMark _pem;
|
||||||
|
ResourceMark _rm;
|
||||||
|
|
||||||
|
public:
|
||||||
|
StackWatermarkProcessingMark(Thread* thread) :
|
||||||
|
_rnhm(),
|
||||||
|
_hm(thread),
|
||||||
|
_pem(thread),
|
||||||
|
_rm(thread) { }
|
||||||
|
};
|
||||||
|
|
||||||
|
void ZVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) {
|
||||||
|
ZVerifyBadOopClosure verify_cl;
|
||||||
|
fr.oops_do(&verify_cl, NULL, ®ister_map, DerivedPointerIterationMode::_ignore);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZVerify::verify_thread_head_bad(JavaThread* jt) {
|
||||||
|
ZVerifyBadOopClosure verify_cl;
|
||||||
|
jt->oops_do_no_frames(&verify_cl, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZVerify::verify_thread_frames_bad(JavaThread* jt) {
|
||||||
|
if (jt->has_last_Java_frame()) {
|
||||||
|
ZVerifyBadOopClosure verify_cl;
|
||||||
|
StackWatermarkProcessingMark swpm(Thread::current());
|
||||||
|
// Traverse the execution stack
|
||||||
|
for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) {
|
||||||
|
fst.current()->oops_do(&verify_cl, NULL /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // ASSERT
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,25 +26,30 @@
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
|
||||||
|
class frame;
|
||||||
class ZPageAllocator;
|
class ZPageAllocator;
|
||||||
|
|
||||||
class ZVerify : public AllStatic {
|
class ZVerify : public AllStatic {
|
||||||
private:
|
private:
|
||||||
template <typename RootsIterator> static void roots();
|
template <typename RootsIterator> static void roots(bool verify_fixed);
|
||||||
|
|
||||||
static void roots_strong();
|
static void roots_strong();
|
||||||
static void roots_weak();
|
static void roots_weak();
|
||||||
static void roots_concurrent_strong();
|
static void roots_concurrent_strong(bool verify_fixed);
|
||||||
static void roots_concurrent_weak();
|
static void roots_concurrent_weak();
|
||||||
|
|
||||||
static void roots(bool verify_weaks);
|
static void roots(bool verify_concurrent_strong, bool verify_weaks);
|
||||||
static void objects(bool verify_weaks);
|
static void objects(bool verify_weaks);
|
||||||
static void roots_and_objects(bool verify_weaks);
|
static void roots_and_objects(bool verify_concurrent_strong, bool verify_weaks);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void before_zoperation();
|
static void before_zoperation();
|
||||||
static void after_mark();
|
static void after_mark();
|
||||||
static void after_weak_processing();
|
static void after_weak_processing();
|
||||||
|
|
||||||
|
static void verify_thread_head_bad(JavaThread* thread) NOT_DEBUG_RETURN;
|
||||||
|
static void verify_thread_frames_bad(JavaThread* thread) NOT_DEBUG_RETURN;
|
||||||
|
static void verify_frame_bad(const frame& fr, RegisterMap& register_map) NOT_DEBUG_RETURN;
|
||||||
};
|
};
|
||||||
|
|
||||||
class ZVerifyViewsFlip {
|
class ZVerifyViewsFlip {
|
||||||
|
|
|
@ -64,6 +64,7 @@
|
||||||
#include "runtime/jfieldIDWorkaround.hpp"
|
#include "runtime/jfieldIDWorkaround.hpp"
|
||||||
#include "runtime/osThread.hpp"
|
#include "runtime/osThread.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "runtime/synchronizer.hpp"
|
#include "runtime/synchronizer.hpp"
|
||||||
#include "runtime/threadCritical.hpp"
|
#include "runtime/threadCritical.hpp"
|
||||||
|
@ -447,6 +448,10 @@ JRT_END
|
||||||
// from a call, the expression stack contains the values for the bci at the
|
// from a call, the expression stack contains the values for the bci at the
|
||||||
// invoke w/o arguments (i.e., as if one were inside the call).
|
// invoke w/o arguments (i.e., as if one were inside the call).
|
||||||
JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception))
|
JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception))
|
||||||
|
// We get here after we have unwound from a callee throwing an exception
|
||||||
|
// into the interpreter. Any deferred stack processing is notified of
|
||||||
|
// the event via the StackWatermarkSet.
|
||||||
|
StackWatermarkSet::after_unwind(thread);
|
||||||
|
|
||||||
LastFrameAccessor last_frame(thread);
|
LastFrameAccessor last_frame(thread);
|
||||||
Handle h_exception(thread, exception);
|
Handle h_exception(thread, exception);
|
||||||
|
@ -1153,6 +1158,11 @@ JRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread))
|
||||||
// if this is called during a safepoint
|
// if this is called during a safepoint
|
||||||
|
|
||||||
if (JvmtiExport::should_post_single_step()) {
|
if (JvmtiExport::should_post_single_step()) {
|
||||||
|
// This function is called by the interpreter when single stepping. Such single
|
||||||
|
// stepping could unwind a frame. Then, it is important that we process any frames
|
||||||
|
// that we might return into.
|
||||||
|
StackWatermarkSet::before_unwind(thread);
|
||||||
|
|
||||||
// We are called during regular safepoints and when the VM is
|
// We are called during regular safepoints and when the VM is
|
||||||
// single stepping. If any thread is marked for single stepping,
|
// single stepping. If any thread is marked for single stepping,
|
||||||
// then we may have JVMTI work to do.
|
// then we may have JVMTI work to do.
|
||||||
|
@ -1161,6 +1171,20 @@ JRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread))
|
||||||
}
|
}
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
|
JRT_ENTRY(void, InterpreterRuntime::at_unwind(JavaThread* thread))
|
||||||
|
// JRT_END does an implicit safepoint check, hence we are guaranteed to block
|
||||||
|
// if this is called during a safepoint
|
||||||
|
|
||||||
|
// This function is called by the interpreter when the return poll found a reason
|
||||||
|
// to call the VM. The reason could be that we are returning into a not yet safe
|
||||||
|
// to access frame. We handle that below.
|
||||||
|
// Note that this path does not check for single stepping, because we do not want
|
||||||
|
// to single step when unwinding frames for an exception being thrown. Instead,
|
||||||
|
// such single stepping code will use the safepoint table, which will use the
|
||||||
|
// InterpreterRuntime::at_safepoint callback.
|
||||||
|
StackWatermarkSet::before_unwind(thread);
|
||||||
|
JRT_END
|
||||||
|
|
||||||
JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj,
|
JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj,
|
||||||
ConstantPoolCacheEntry *cp_entry))
|
ConstantPoolCacheEntry *cp_entry))
|
||||||
|
|
||||||
|
|
|
@ -113,6 +113,7 @@ class InterpreterRuntime: AllStatic {
|
||||||
|
|
||||||
// Safepoints
|
// Safepoints
|
||||||
static void at_safepoint(JavaThread* thread);
|
static void at_safepoint(JavaThread* thread);
|
||||||
|
static void at_unwind(JavaThread* thread);
|
||||||
|
|
||||||
// Debugger support
|
// Debugger support
|
||||||
static void post_field_access(JavaThread *thread, oopDesc* obj,
|
static void post_field_access(JavaThread *thread, oopDesc* obj,
|
||||||
|
|
|
@ -258,9 +258,6 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
|
||||||
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt);
|
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt);
|
||||||
|
|
||||||
if (jt->has_last_Java_frame()) {
|
if (jt->has_last_Java_frame()) {
|
||||||
// traverse the registered growable array gc_array
|
|
||||||
// can't do this as it is not reachable from outside
|
|
||||||
|
|
||||||
// Traverse the monitor chunks
|
// Traverse the monitor chunks
|
||||||
MonitorChunk* chunk = jt->monitor_chunks();
|
MonitorChunk* chunk = jt->monitor_chunks();
|
||||||
for (; chunk != NULL; chunk = chunk->next()) {
|
for (; chunk != NULL; chunk = chunk->next()) {
|
||||||
|
@ -272,7 +269,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Traverse the execution stack
|
// Traverse the execution stack
|
||||||
for (StackFrameStream fst(jt); !fst.is_done(); fst.next()) {
|
for (StackFrameStream fst(jt, true /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
|
||||||
fst.current()->oops_do(&rcl, NULL, fst.register_map());
|
fst.current()->oops_do(&rcl, NULL, fst.register_map());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,7 +294,6 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
|
||||||
// around using this function
|
// around using this function
|
||||||
/*
|
/*
|
||||||
* // can't reach these oop* from the outside
|
* // can't reach these oop* from the outside
|
||||||
f->do_oop((oop*) &_threadObj);
|
|
||||||
f->do_oop((oop*) &_vm_result);
|
f->do_oop((oop*) &_vm_result);
|
||||||
f->do_oop((oop*) &_exception_oop);
|
f->do_oop((oop*) &_exception_oop);
|
||||||
f->do_oop((oop*) &_pending_async_exception);
|
f->do_oop((oop*) &_pending_async_exception);
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
|
|
||||||
bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) {
|
bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) {
|
||||||
assert(top_frame.cb() != NULL, "invariant");
|
assert(top_frame.cb() != NULL, "invariant");
|
||||||
RegisterMap map(_thread, false);
|
RegisterMap map(_thread, false, false);
|
||||||
frame candidate = top_frame;
|
frame candidate = top_frame;
|
||||||
for (u4 i = 0; i < MAX_STACK_DEPTH * 2; ++i) {
|
for (u4 i = 0; i < MAX_STACK_DEPTH * 2; ++i) {
|
||||||
if (candidate.is_entry_frame()) {
|
if (candidate.is_entry_frame()) {
|
||||||
|
|
|
@ -134,7 +134,7 @@ void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
|
||||||
class vframeStreamSamples : public vframeStreamCommon {
|
class vframeStreamSamples : public vframeStreamCommon {
|
||||||
public:
|
public:
|
||||||
// constructor that starts with sender of frame fr (top_frame)
|
// constructor that starts with sender of frame fr (top_frame)
|
||||||
vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
|
vframeStreamSamples(JavaThread *jt, frame fr, bool stop_at_java_call_stub) : vframeStreamCommon(jt, false /* process_frames */) {
|
||||||
_stop_at_java_call_stub = stop_at_java_call_stub;
|
_stop_at_java_call_stub = stop_at_java_call_stub;
|
||||||
_frame = fr;
|
_frame = fr;
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ void JfrStackTrace::resolve_linenos() const {
|
||||||
|
|
||||||
bool JfrStackTrace::record_safe(JavaThread* thread, int skip) {
|
bool JfrStackTrace::record_safe(JavaThread* thread, int skip) {
|
||||||
assert(thread == Thread::current(), "Thread stack needs to be walkable");
|
assert(thread == Thread::current(), "Thread stack needs to be walkable");
|
||||||
vframeStream vfs(thread);
|
vframeStream vfs(thread, false /* stop_at_java_call_stub */, false /* process_frames */);
|
||||||
u4 count = 0;
|
u4 count = 0;
|
||||||
_reached_root = true;
|
_reached_root = true;
|
||||||
for (int i = 0; i < skip; i++) {
|
for (int i = 0; i < skip; i++) {
|
||||||
|
|
|
@ -1223,7 +1223,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job
|
||||||
HotSpotJVMCI::HotSpotStackFrameReference::klass()->initialize(CHECK_NULL);
|
HotSpotJVMCI::HotSpotStackFrameReference::klass()->initialize(CHECK_NULL);
|
||||||
Handle frame_reference = HotSpotJVMCI::HotSpotStackFrameReference::klass()->allocate_instance_handle(CHECK_NULL);
|
Handle frame_reference = HotSpotJVMCI::HotSpotStackFrameReference::klass()->allocate_instance_handle(CHECK_NULL);
|
||||||
|
|
||||||
StackFrameStream fst(thread);
|
StackFrameStream fst(thread, true /* update */, true /* process_frames */);
|
||||||
jobjectArray methods = initial_methods;
|
jobjectArray methods = initial_methods;
|
||||||
|
|
||||||
int frame_number = 0;
|
int frame_number = 0;
|
||||||
|
@ -1330,7 +1330,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job
|
||||||
if (HotSpotJVMCI::HotSpotStackFrameReference::objectsMaterialized(JVMCIENV, frame_reference()) == JNI_TRUE) {
|
if (HotSpotJVMCI::HotSpotStackFrameReference::objectsMaterialized(JVMCIENV, frame_reference()) == JNI_TRUE) {
|
||||||
// the frame has been deoptimized, we need to re-synchronize the frame and vframe
|
// the frame has been deoptimized, we need to re-synchronize the frame and vframe
|
||||||
intptr_t* stack_pointer = (intptr_t*) HotSpotJVMCI::HotSpotStackFrameReference::stackPointer(JVMCIENV, frame_reference());
|
intptr_t* stack_pointer = (intptr_t*) HotSpotJVMCI::HotSpotStackFrameReference::stackPointer(JVMCIENV, frame_reference());
|
||||||
fst = StackFrameStream(thread);
|
fst = StackFrameStream(thread, true /* update */, true /* process_frames */);
|
||||||
while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
|
while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
|
||||||
fst.next();
|
fst.next();
|
||||||
}
|
}
|
||||||
|
@ -1462,7 +1462,7 @@ C2V_VMENTRY(void, materializeVirtualObjects, (JNIEnv* env, jobject, jobject _hs_
|
||||||
JVMCIENV->HotSpotStackFrameReference_initialize(JVMCI_CHECK);
|
JVMCIENV->HotSpotStackFrameReference_initialize(JVMCI_CHECK);
|
||||||
|
|
||||||
// look for the given stack frame
|
// look for the given stack frame
|
||||||
StackFrameStream fst(thread, false);
|
StackFrameStream fst(thread, false /* update */, true /* process_frames */);
|
||||||
intptr_t* stack_pointer = (intptr_t*) JVMCIENV->get_HotSpotStackFrameReference_stackPointer(hs_frame);
|
intptr_t* stack_pointer = (intptr_t*) JVMCIENV->get_HotSpotStackFrameReference_stackPointer(hs_frame);
|
||||||
while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
|
while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
|
||||||
fst.next();
|
fst.next();
|
||||||
|
@ -1480,7 +1480,7 @@ C2V_VMENTRY(void, materializeVirtualObjects, (JNIEnv* env, jobject, jobject _hs_
|
||||||
}
|
}
|
||||||
Deoptimization::deoptimize(thread, *fst.current(), Deoptimization::Reason_none);
|
Deoptimization::deoptimize(thread, *fst.current(), Deoptimization::Reason_none);
|
||||||
// look for the frame again as it has been updated by deopt (pc, deopt state...)
|
// look for the frame again as it has been updated by deopt (pc, deopt state...)
|
||||||
StackFrameStream fstAfterDeopt(thread);
|
StackFrameStream fstAfterDeopt(thread, true /* update */, true /* process_frames */);
|
||||||
while (fstAfterDeopt.current()->sp() != stack_pointer && !fstAfterDeopt.is_done()) {
|
while (fstAfterDeopt.current()->sp() != stack_pointer && !fstAfterDeopt.is_done()) {
|
||||||
fstAfterDeopt.next();
|
fstAfterDeopt.next();
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,7 +328,7 @@
|
||||||
\
|
\
|
||||||
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
|
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
|
||||||
nonstatic_field(Thread, _allocated_bytes, jlong) \
|
nonstatic_field(Thread, _allocated_bytes, jlong) \
|
||||||
nonstatic_field(Thread, _polling_page, volatile void*) \
|
nonstatic_field(Thread, _poll_data, SafepointMechanism::ThreadData) \
|
||||||
\
|
\
|
||||||
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
|
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
|
||||||
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
|
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
|
||||||
|
@ -340,6 +340,9 @@
|
||||||
nonstatic_field(ThreadLocalAllocBuffer, _fast_refill_waste, unsigned) \
|
nonstatic_field(ThreadLocalAllocBuffer, _fast_refill_waste, unsigned) \
|
||||||
nonstatic_field(ThreadLocalAllocBuffer, _slow_allocations, unsigned) \
|
nonstatic_field(ThreadLocalAllocBuffer, _slow_allocations, unsigned) \
|
||||||
\
|
\
|
||||||
|
nonstatic_field(SafepointMechanism::ThreadData, _polling_word, volatile uintptr_t) \
|
||||||
|
nonstatic_field(SafepointMechanism::ThreadData, _polling_page, volatile uintptr_t) \
|
||||||
|
\
|
||||||
nonstatic_field(ThreadShadow, _pending_exception, oop) \
|
nonstatic_field(ThreadShadow, _pending_exception, oop) \
|
||||||
\
|
\
|
||||||
static_field(vmSymbols, _symbols[0], Symbol*) \
|
static_field(vmSymbols, _symbols[0], Symbol*) \
|
||||||
|
|
|
@ -154,6 +154,7 @@
|
||||||
LOG_TAG(sealed) \
|
LOG_TAG(sealed) \
|
||||||
LOG_TAG(setting) \
|
LOG_TAG(setting) \
|
||||||
LOG_TAG(smr) \
|
LOG_TAG(smr) \
|
||||||
|
LOG_TAG(stackbarrier) \
|
||||||
LOG_TAG(stackmap) \
|
LOG_TAG(stackmap) \
|
||||||
LOG_TAG(stacktrace) \
|
LOG_TAG(stacktrace) \
|
||||||
LOG_TAG(stackwalk) \
|
LOG_TAG(stackwalk) \
|
||||||
|
|
|
@ -224,6 +224,72 @@ public:
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
volatile int C2SafepointPollStubTable::_stub_size = 0;
|
||||||
|
|
||||||
|
Label& C2SafepointPollStubTable::add_safepoint(uintptr_t safepoint_offset) {
|
||||||
|
C2SafepointPollStub* entry = new (Compile::current()->comp_arena()) C2SafepointPollStub(safepoint_offset);
|
||||||
|
_safepoints.append(entry);
|
||||||
|
return entry->_stub_label;
|
||||||
|
}
|
||||||
|
|
||||||
|
void C2SafepointPollStubTable::emit(CodeBuffer& cb) {
|
||||||
|
MacroAssembler masm(&cb);
|
||||||
|
for (int i = _safepoints.length() - 1; i >= 0; i--) {
|
||||||
|
// Make sure there is enough space in the code buffer
|
||||||
|
if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
|
||||||
|
ciEnv::current()->record_failure("CodeCache is full");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
C2SafepointPollStub* entry = _safepoints.at(i);
|
||||||
|
emit_stub(masm, entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int C2SafepointPollStubTable::stub_size_lazy() const {
|
||||||
|
int size = Atomic::load(&_stub_size);
|
||||||
|
|
||||||
|
if (size != 0) {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
Compile* const C = Compile::current();
|
||||||
|
BufferBlob* const blob = C->output()->scratch_buffer_blob();
|
||||||
|
CodeBuffer cb(blob->content_begin(), C->output()->scratch_buffer_code_size());
|
||||||
|
MacroAssembler masm(&cb);
|
||||||
|
C2SafepointPollStub* entry = _safepoints.at(0);
|
||||||
|
emit_stub(masm, entry);
|
||||||
|
size += cb.insts_size();
|
||||||
|
|
||||||
|
Atomic::store(&_stub_size, size);
|
||||||
|
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
int C2SafepointPollStubTable::estimate_stub_size() const {
|
||||||
|
if (_safepoints.length() == 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int result = stub_size_lazy() * _safepoints.length();
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
Compile* const C = Compile::current();
|
||||||
|
BufferBlob* const blob = C->output()->scratch_buffer_blob();
|
||||||
|
int size = 0;
|
||||||
|
|
||||||
|
for (int i = _safepoints.length() - 1; i >= 0; i--) {
|
||||||
|
CodeBuffer cb(blob->content_begin(), C->output()->scratch_buffer_code_size());
|
||||||
|
MacroAssembler masm(&cb);
|
||||||
|
C2SafepointPollStub* entry = _safepoints.at(i);
|
||||||
|
emit_stub(masm, entry);
|
||||||
|
size += cb.insts_size();
|
||||||
|
}
|
||||||
|
assert(size == result, "stubs should not have variable size");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
PhaseOutput::PhaseOutput()
|
PhaseOutput::PhaseOutput()
|
||||||
: Phase(Phase::Output),
|
: Phase(Phase::Output),
|
||||||
|
@ -1235,6 +1301,7 @@ CodeBuffer* PhaseOutput::init_buffer() {
|
||||||
|
|
||||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||||
stub_req += bs->estimate_stub_size();
|
stub_req += bs->estimate_stub_size();
|
||||||
|
stub_req += safepoint_poll_table()->estimate_stub_size();
|
||||||
|
|
||||||
// nmethod and CodeBuffer count stubs & constants as part of method's code.
|
// nmethod and CodeBuffer count stubs & constants as part of method's code.
|
||||||
// class HandlerImpl is platform-specific and defined in the *.ad files.
|
// class HandlerImpl is platform-specific and defined in the *.ad files.
|
||||||
|
@ -1737,6 +1804,10 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
bs->emit_stubs(*cb);
|
bs->emit_stubs(*cb);
|
||||||
if (C->failing()) return;
|
if (C->failing()) return;
|
||||||
|
|
||||||
|
// Fill in stubs for calling the runtime from safepoint polls.
|
||||||
|
safepoint_poll_table()->emit(*cb);
|
||||||
|
if (C->failing()) return;
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// Information on the size of the method, without the extraneous code
|
// Information on the size of the method, without the extraneous code
|
||||||
Scheduling::increment_method_size(cb->insts_size());
|
Scheduling::increment_method_size(cb->insts_size());
|
||||||
|
|
|
@ -25,11 +25,13 @@
|
||||||
#ifndef SHARE_OPTO_OUTPUT_HPP
|
#ifndef SHARE_OPTO_OUTPUT_HPP
|
||||||
#define SHARE_OPTO_OUTPUT_HPP
|
#define SHARE_OPTO_OUTPUT_HPP
|
||||||
|
|
||||||
|
#include "code/debugInfo.hpp"
|
||||||
|
#include "code/exceptionHandlerTable.hpp"
|
||||||
|
#include "metaprogramming/enableIf.hpp"
|
||||||
#include "opto/ad.hpp"
|
#include "opto/ad.hpp"
|
||||||
#include "opto/constantTable.hpp"
|
#include "opto/constantTable.hpp"
|
||||||
#include "opto/phase.hpp"
|
#include "opto/phase.hpp"
|
||||||
#include "code/debugInfo.hpp"
|
#include "runtime/vm_version.hpp"
|
||||||
#include "code/exceptionHandlerTable.hpp"
|
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
|
@ -70,6 +72,47 @@ public:
|
||||||
{ };
|
{ };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class C2SafepointPollStubTable {
|
||||||
|
private:
|
||||||
|
struct C2SafepointPollStub: public ResourceObj {
|
||||||
|
uintptr_t _safepoint_offset;
|
||||||
|
Label _stub_label;
|
||||||
|
Label _trampoline_label;
|
||||||
|
C2SafepointPollStub(uintptr_t safepoint_offset) :
|
||||||
|
_safepoint_offset(safepoint_offset),
|
||||||
|
_stub_label(),
|
||||||
|
_trampoline_label() {}
|
||||||
|
};
|
||||||
|
|
||||||
|
GrowableArray<C2SafepointPollStub*> _safepoints;
|
||||||
|
|
||||||
|
static volatile int _stub_size;
|
||||||
|
|
||||||
|
void emit_stub_impl(MacroAssembler& masm, C2SafepointPollStub* entry) const;
|
||||||
|
|
||||||
|
// The selection logic below relieves the need to add dummy files to unsupported platforms.
|
||||||
|
template <bool enabled>
|
||||||
|
typename EnableIf<enabled>::type
|
||||||
|
select_emit_stub(MacroAssembler& masm, C2SafepointPollStub* entry) const {
|
||||||
|
emit_stub_impl(masm, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool enabled>
|
||||||
|
typename EnableIf<!enabled>::type
|
||||||
|
select_emit_stub(MacroAssembler& masm, C2SafepointPollStub* entry) const {}
|
||||||
|
|
||||||
|
void emit_stub(MacroAssembler& masm, C2SafepointPollStub* entry) const {
|
||||||
|
select_emit_stub<VM_Version::supports_stack_watermark_barrier()>(masm, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
int stub_size_lazy() const;
|
||||||
|
|
||||||
|
public:
|
||||||
|
Label& add_safepoint(uintptr_t safepoint_offset);
|
||||||
|
int estimate_stub_size() const;
|
||||||
|
void emit(CodeBuffer& cb);
|
||||||
|
};
|
||||||
|
|
||||||
class PhaseOutput : public Phase {
|
class PhaseOutput : public Phase {
|
||||||
private:
|
private:
|
||||||
// Instruction bits passed off to the VM
|
// Instruction bits passed off to the VM
|
||||||
|
@ -78,6 +121,7 @@ private:
|
||||||
int _first_block_size; // Size of unvalidated entry point code / OSR poison code
|
int _first_block_size; // Size of unvalidated entry point code / OSR poison code
|
||||||
ExceptionHandlerTable _handler_table; // Table of native-code exception handlers
|
ExceptionHandlerTable _handler_table; // Table of native-code exception handlers
|
||||||
ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code
|
ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code
|
||||||
|
C2SafepointPollStubTable _safepoint_poll_table;// Table for safepoint polls
|
||||||
OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location)
|
OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location)
|
||||||
BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
|
BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
|
||||||
relocInfo* _scratch_locs_memory; // For temporary code buffers.
|
relocInfo* _scratch_locs_memory; // For temporary code buffers.
|
||||||
|
@ -126,6 +170,9 @@ public:
|
||||||
// Constant table
|
// Constant table
|
||||||
ConstantTable& constant_table() { return _constant_table; }
|
ConstantTable& constant_table() { return _constant_table; }
|
||||||
|
|
||||||
|
// Safepoint poll table
|
||||||
|
C2SafepointPollStubTable* safepoint_poll_table() { return &_safepoint_poll_table; }
|
||||||
|
|
||||||
// Code emission iterator
|
// Code emission iterator
|
||||||
Block* block() { return _block; }
|
Block* block() { return _block; }
|
||||||
int index() { return _index; }
|
int index() { return _index; }
|
||||||
|
@ -173,6 +220,7 @@ public:
|
||||||
void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
|
void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
|
||||||
relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
|
relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
|
||||||
void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
|
void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
|
||||||
|
int scratch_buffer_code_size() { return (address)scratch_locs_memory() - _scratch_buffer_blob->content_begin(); }
|
||||||
|
|
||||||
// emit to scratch blob, report resulting size
|
// emit to scratch blob, report resulting size
|
||||||
uint scratch_emit_size(const Node* n);
|
uint scratch_emit_size(const Node* n);
|
||||||
|
|
|
@ -68,6 +68,7 @@
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/threadCritical.hpp"
|
#include "runtime/threadCritical.hpp"
|
||||||
#include "runtime/vframe.hpp"
|
#include "runtime/vframe.hpp"
|
||||||
#include "runtime/vframeArray.hpp"
|
#include "runtime/vframeArray.hpp"
|
||||||
|
@ -1286,7 +1287,6 @@ static void trace_exception(outputStream* st, oop exception_oop, address excepti
|
||||||
// directly from compiled code. Compiled code will call the C++ method following.
|
// directly from compiled code. Compiled code will call the C++ method following.
|
||||||
// We can't allow async exception to be installed during exception processing.
|
// We can't allow async exception to be installed during exception processing.
|
||||||
JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* thread, nmethod* &nm))
|
JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* thread, nmethod* &nm))
|
||||||
|
|
||||||
// Do not confuse exception_oop with pending_exception. The exception_oop
|
// Do not confuse exception_oop with pending_exception. The exception_oop
|
||||||
// is only used to pass arguments into the method. Not for general
|
// is only used to pass arguments into the method. Not for general
|
||||||
// exception handling. DO NOT CHANGE IT to use pending_exception, since
|
// exception handling. DO NOT CHANGE IT to use pending_exception, since
|
||||||
|
@ -1464,6 +1464,11 @@ address OptoRuntime::handle_exception_C(JavaThread* thread) {
|
||||||
// *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
|
// *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
|
||||||
//
|
//
|
||||||
address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
|
address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
|
||||||
|
// The frame we rethrow the exception to might not have been processed by the GC yet.
|
||||||
|
// The stack watermark barrier takes care of detecting that and ensuring the frame
|
||||||
|
// has updated oops.
|
||||||
|
StackWatermarkSet::after_unwind(thread);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
SharedRuntime::_rethrow_ctr++; // count rethrows
|
SharedRuntime::_rethrow_ctr++; // count rethrows
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -91,7 +91,7 @@ static bool is_decipherable_interpreted_frame(JavaThread* thread,
|
||||||
|
|
||||||
vframeStreamForte::vframeStreamForte(JavaThread *jt,
|
vframeStreamForte::vframeStreamForte(JavaThread *jt,
|
||||||
frame fr,
|
frame fr,
|
||||||
bool stop_at_java_call_stub) : vframeStreamCommon(jt) {
|
bool stop_at_java_call_stub) : vframeStreamCommon(jt, false /* process_frames */) {
|
||||||
|
|
||||||
_stop_at_java_call_stub = stop_at_java_call_stub;
|
_stop_at_java_call_stub = stop_at_java_call_stub;
|
||||||
_frame = fr;
|
_frame = fr;
|
||||||
|
|
|
@ -215,7 +215,7 @@ class EnterInterpOnlyModeClosure : public HandshakeClosure {
|
||||||
// interpreted-only mode is enabled the first time for a given
|
// interpreted-only mode is enabled the first time for a given
|
||||||
// thread (nothing to do if no Java frames yet).
|
// thread (nothing to do if no Java frames yet).
|
||||||
ResourceMark resMark;
|
ResourceMark resMark;
|
||||||
for (StackFrameStream fst(jt, false); !fst.is_done(); fst.next()) {
|
for (StackFrameStream fst(jt, false /* update */, false /* process_frames */); !fst.is_done(); fst.next()) {
|
||||||
if (fst.current()->can_be_deoptimized()) {
|
if (fst.current()->can_be_deoptimized()) {
|
||||||
Deoptimization::deoptimize(jt, *fst.current());
|
Deoptimization::deoptimize(jt, *fst.current());
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "logging/logConfiguration.hpp"
|
#include "logging/logConfiguration.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "prims/jvmtiTrace.hpp"
|
#include "prims/jvmtiTrace.hpp"
|
||||||
|
#include "runtime/thread.inline.hpp"
|
||||||
|
|
||||||
//
|
//
|
||||||
// class JvmtiTrace
|
// class JvmtiTrace
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include "runtime/globals.hpp"
|
#include "runtime/globals.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/vframe.inline.hpp"
|
#include "runtime/vframe.inline.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
@ -480,6 +481,11 @@ jint StackWalk::fetchNextBatch(Handle stackStream, jlong mode, jlong magic,
|
||||||
|
|
||||||
BaseFrameStream& stream = (*existing_stream);
|
BaseFrameStream& stream = (*existing_stream);
|
||||||
if (!stream.at_end()) {
|
if (!stream.at_end()) {
|
||||||
|
// If we have to get back here for even more frames, then 1) the user did not supply
|
||||||
|
// an accurate hint suggesting the depth of the stack walk, and 2) we are not just
|
||||||
|
// peeking at a few frames. Take the cost of flushing out any pending deferred GC
|
||||||
|
// processing of the stack.
|
||||||
|
StackWatermarkSet::finish_processing(jt, NULL /* context */, StackWatermarkKind::gc);
|
||||||
stream.next(); // advance past the last frame decoded in previous batch
|
stream.next(); // advance past the last frame decoded in previous batch
|
||||||
if (!stream.at_end()) {
|
if (!stream.at_end()) {
|
||||||
int n = fill_in_frames(mode, stream, frame_count, start_index,
|
int n = fill_in_frames(mode, stream, frame_count, start_index,
|
||||||
|
|
|
@ -857,7 +857,7 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
|
||||||
void doit() {
|
void doit() {
|
||||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
||||||
if (t->has_last_Java_frame()) {
|
if (t->has_last_Java_frame()) {
|
||||||
for (StackFrameStream fst(t, false); !fst.is_done(); fst.next()) {
|
for (StackFrameStream fst(t, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
|
||||||
frame* f = fst.current();
|
frame* f = fst.current();
|
||||||
if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) {
|
if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) {
|
||||||
Deoptimization::deoptimize(t, *f);
|
Deoptimization::deoptimize(t, *f);
|
||||||
|
|
|
@ -185,6 +185,9 @@ class Abstract_VM_Version: AllStatic {
|
||||||
// Does platform support fast class initialization checks for static methods?
|
// Does platform support fast class initialization checks for static methods?
|
||||||
static bool supports_fast_class_init_checks() { return false; }
|
static bool supports_fast_class_init_checks() { return false; }
|
||||||
|
|
||||||
|
// Does platform support stack watermark barriers for concurrent stack processing?
|
||||||
|
constexpr static bool supports_stack_watermark_barrier() { return false; }
|
||||||
|
|
||||||
static bool print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]);
|
static bool print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -61,6 +61,7 @@
|
||||||
#include "runtime/safepointVerifiers.hpp"
|
#include "runtime/safepointVerifiers.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "runtime/threadSMR.hpp"
|
#include "runtime/threadSMR.hpp"
|
||||||
|
@ -160,6 +161,13 @@ JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(
|
||||||
}
|
}
|
||||||
thread->inc_in_deopt_handler();
|
thread->inc_in_deopt_handler();
|
||||||
|
|
||||||
|
if (exec_mode == Unpack_exception) {
|
||||||
|
// When we get here, a callee has thrown an exception into a deoptimized
|
||||||
|
// frame. That throw might have deferred stack watermark checking until
|
||||||
|
// after unwinding. So we deal with such deferred requests here.
|
||||||
|
StackWatermarkSet::after_unwind(thread);
|
||||||
|
}
|
||||||
|
|
||||||
return fetch_unroll_info_helper(thread, exec_mode);
|
return fetch_unroll_info_helper(thread, exec_mode);
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
|
@ -254,6 +262,10 @@ static void eliminate_locks(JavaThread* thread, GrowableArray<compiledVFrame*>*
|
||||||
|
|
||||||
// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
|
// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
|
||||||
Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
|
Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) {
|
||||||
|
// When we get here we are about to unwind the deoptee frame. In order to
|
||||||
|
// catch not yet safe to use frames, the following stack watermark barrier
|
||||||
|
// poll will make such frames safe to use.
|
||||||
|
StackWatermarkSet::before_unwind(thread);
|
||||||
|
|
||||||
// Note: there is a safepoint safety issue here. No matter whether we enter
|
// Note: there is a safepoint safety issue here. No matter whether we enter
|
||||||
// via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
|
// via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
|
||||||
|
@ -1510,7 +1522,7 @@ static void get_monitors_from_stack(GrowableArray<Handle>* objects_to_revoke, Ja
|
||||||
// the places we want to call this routine so we need to walk the
|
// the places we want to call this routine so we need to walk the
|
||||||
// stack again to update the register map.
|
// stack again to update the register map.
|
||||||
if (map == NULL || !map->update_map()) {
|
if (map == NULL || !map->update_map()) {
|
||||||
StackFrameStream sfs(thread, true);
|
StackFrameStream sfs(thread, true /* update */, true /* process_frames */);
|
||||||
bool found = false;
|
bool found = false;
|
||||||
while (!found && !sfs.is_done()) {
|
while (!found && !sfs.is_done()) {
|
||||||
frame* cur = sfs.current();
|
frame* cur = sfs.current();
|
||||||
|
|
|
@ -53,9 +53,10 @@
|
||||||
#include "utilities/decoder.hpp"
|
#include "utilities/decoder.hpp"
|
||||||
#include "utilities/formatBuffer.hpp"
|
#include "utilities/formatBuffer.hpp"
|
||||||
|
|
||||||
RegisterMap::RegisterMap(JavaThread *thread, bool update_map) {
|
RegisterMap::RegisterMap(JavaThread *thread, bool update_map, bool process_frames) {
|
||||||
_thread = thread;
|
_thread = thread;
|
||||||
_update_map = update_map;
|
_update_map = update_map;
|
||||||
|
_process_frames = process_frames;
|
||||||
clear();
|
clear();
|
||||||
debug_only(_update_for_id = NULL;)
|
debug_only(_update_for_id = NULL;)
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -68,6 +69,7 @@ RegisterMap::RegisterMap(const RegisterMap* map) {
|
||||||
assert(map != NULL, "RegisterMap must be present");
|
assert(map != NULL, "RegisterMap must be present");
|
||||||
_thread = map->thread();
|
_thread = map->thread();
|
||||||
_update_map = map->update_map();
|
_update_map = map->update_map();
|
||||||
|
_process_frames = map->process_frames();
|
||||||
_include_argument_oops = map->include_argument_oops();
|
_include_argument_oops = map->include_argument_oops();
|
||||||
debug_only(_update_for_id = map->_update_for_id;)
|
debug_only(_update_for_id = map->_update_for_id;)
|
||||||
pd_initialize_from(map);
|
pd_initialize_from(map);
|
||||||
|
@ -896,10 +898,11 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver,
|
||||||
finder.oops_do();
|
finder.oops_do();
|
||||||
}
|
}
|
||||||
|
|
||||||
void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) const {
|
void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map,
|
||||||
|
DerivedPointerIterationMode derived_mode) const {
|
||||||
assert(_cb != NULL, "sanity check");
|
assert(_cb != NULL, "sanity check");
|
||||||
if (_cb->oop_maps() != NULL) {
|
if (_cb->oop_maps() != NULL) {
|
||||||
OopMapSet::oops_do(this, reg_map, f);
|
OopMapSet::oops_do(this, reg_map, f, derived_mode);
|
||||||
|
|
||||||
// Preserve potential arguments for a callee. We handle this by dispatching
|
// Preserve potential arguments for a callee. We handle this by dispatching
|
||||||
// on the codeblob. For c2i, we do
|
// on the codeblob. For c2i, we do
|
||||||
|
@ -1035,8 +1038,19 @@ void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) const {
|
||||||
entry_frame_call_wrapper()->oops_do(f);
|
entry_frame_call_wrapper()->oops_do(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void frame::oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
|
||||||
|
DerivedPointerIterationMode derived_mode) const {
|
||||||
|
oops_do_internal(f, cf, map, true, derived_mode);
|
||||||
|
}
|
||||||
|
|
||||||
void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map, bool use_interpreter_oop_map_cache) const {
|
void frame::oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map) const {
|
||||||
|
oops_do_internal(f, cf, map, true, DerivedPointerTable::is_active() ?
|
||||||
|
DerivedPointerIterationMode::_with_table :
|
||||||
|
DerivedPointerIterationMode::_ignore);
|
||||||
|
}
|
||||||
|
|
||||||
|
void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
|
||||||
|
bool use_interpreter_oop_map_cache, DerivedPointerIterationMode derived_mode) const {
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// simulate GC crash here to dump java thread in error report
|
// simulate GC crash here to dump java thread in error report
|
||||||
if (CrashGCForDumpingJavaThread) {
|
if (CrashGCForDumpingJavaThread) {
|
||||||
|
@ -1049,7 +1063,7 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterM
|
||||||
} else if (is_entry_frame()) {
|
} else if (is_entry_frame()) {
|
||||||
oops_entry_do(f, map);
|
oops_entry_do(f, map);
|
||||||
} else if (CodeCache::contains(pc())) {
|
} else if (CodeCache::contains(pc())) {
|
||||||
oops_code_blob_do(f, cf, map);
|
oops_code_blob_do(f, cf, map, derived_mode);
|
||||||
} else {
|
} else {
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
@ -1086,7 +1100,7 @@ void frame::verify(const RegisterMap* map) const {
|
||||||
#if COMPILER2_OR_JVMCI
|
#if COMPILER2_OR_JVMCI
|
||||||
assert(DerivedPointerTable::is_empty(), "must be empty before verify");
|
assert(DerivedPointerTable::is_empty(), "must be empty before verify");
|
||||||
#endif
|
#endif
|
||||||
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, map, false);
|
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, map, false, DerivedPointerIterationMode::_ignore);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1219,7 +1233,7 @@ void frame::describe(FrameValues& values, int frame_no) {
|
||||||
//-----------------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------------
|
||||||
// StackFrameStream implementation
|
// StackFrameStream implementation
|
||||||
|
|
||||||
StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) {
|
StackFrameStream::StackFrameStream(JavaThread *thread, bool update, bool process_frames) : _reg_map(thread, update, process_frames) {
|
||||||
assert(thread->has_last_Java_frame(), "sanity check");
|
assert(thread->has_last_Java_frame(), "sanity check");
|
||||||
_fr = thread->last_frame();
|
_fr = thread->last_frame();
|
||||||
_is_done = false;
|
_is_done = false;
|
||||||
|
|
|
@ -41,6 +41,11 @@ class FrameValues;
|
||||||
class vframeArray;
|
class vframeArray;
|
||||||
class JavaCallWrapper;
|
class JavaCallWrapper;
|
||||||
|
|
||||||
|
enum class DerivedPointerIterationMode {
|
||||||
|
_with_table,
|
||||||
|
_directly,
|
||||||
|
_ignore
|
||||||
|
};
|
||||||
|
|
||||||
// A frame represents a physical stack frame (an activation). Frames
|
// A frame represents a physical stack frame (an activation). Frames
|
||||||
// can be C or Java frames, and the Java frames can be interpreted or
|
// can be C or Java frames, and the Java frames can be interpreted or
|
||||||
|
@ -366,13 +371,17 @@ class frame {
|
||||||
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const;
|
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const;
|
||||||
|
|
||||||
// Iteration of oops
|
// Iteration of oops
|
||||||
void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map, bool use_interpreter_oop_map_cache) const;
|
void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
|
||||||
|
bool use_interpreter_oop_map_cache, DerivedPointerIterationMode derived_mode) const;
|
||||||
void oops_entry_do(OopClosure* f, const RegisterMap* map) const;
|
void oops_entry_do(OopClosure* f, const RegisterMap* map) const;
|
||||||
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map) const;
|
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
|
||||||
|
DerivedPointerIterationMode derived_mode) const;
|
||||||
int adjust_offset(Method* method, int index); // helper for above fn
|
int adjust_offset(Method* method, int index); // helper for above fn
|
||||||
public:
|
public:
|
||||||
// Memory management
|
// Memory management
|
||||||
void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map) const { oops_do_internal(f, cf, map, true); }
|
void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
|
||||||
|
DerivedPointerIterationMode derived_mode) const;
|
||||||
|
void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map) const;
|
||||||
void nmethods_do(CodeBlobClosure* cf) const;
|
void nmethods_do(CodeBlobClosure* cf) const;
|
||||||
|
|
||||||
// RedefineClasses support for finding live interpreted methods on the stack
|
// RedefineClasses support for finding live interpreted methods on the stack
|
||||||
|
@ -439,12 +448,19 @@ class FrameValues {
|
||||||
//
|
//
|
||||||
// StackFrameStream iterates through the frames of a thread starting from
|
// StackFrameStream iterates through the frames of a thread starting from
|
||||||
// top most frame. It automatically takes care of updating the location of
|
// top most frame. It automatically takes care of updating the location of
|
||||||
// all (callee-saved) registers. Notice: If a thread is stopped at
|
// all (callee-saved) registers iff the update flag is set. It also
|
||||||
// a safepoint, all registers are saved, not only the callee-saved ones.
|
// automatically takes care of lazily applying deferred GC processing
|
||||||
|
// onto exposed frames, such that all oops are valid iff the process_frames
|
||||||
|
// flag is set.
|
||||||
|
//
|
||||||
|
// Notice: If a thread is stopped at a safepoint, all registers are saved,
|
||||||
|
// not only the callee-saved ones.
|
||||||
//
|
//
|
||||||
// Use:
|
// Use:
|
||||||
//
|
//
|
||||||
// for(StackFrameStream fst(thread); !fst.is_done(); fst.next()) {
|
// for(StackFrameStream fst(thread, true /* update */, true /* process_frames */);
|
||||||
|
// !fst.is_done();
|
||||||
|
// fst.next()) {
|
||||||
// ...
|
// ...
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
|
@ -454,7 +470,7 @@ class StackFrameStream : public StackObj {
|
||||||
RegisterMap _reg_map;
|
RegisterMap _reg_map;
|
||||||
bool _is_done;
|
bool _is_done;
|
||||||
public:
|
public:
|
||||||
StackFrameStream(JavaThread *thread, bool update = true);
|
StackFrameStream(JavaThread *thread, bool update, bool process_frames);
|
||||||
|
|
||||||
// Iteration
|
// Iteration
|
||||||
inline bool is_done();
|
inline bool is_done();
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "runtime/handshake.hpp"
|
#include "runtime/handshake.hpp"
|
||||||
#include "runtime/interfaceSupport.inline.hpp"
|
#include "runtime/interfaceSupport.inline.hpp"
|
||||||
#include "runtime/osThread.hpp"
|
#include "runtime/osThread.hpp"
|
||||||
|
#include "runtime/stackWatermarkSet.hpp"
|
||||||
#include "runtime/task.hpp"
|
#include "runtime/task.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
|
@ -520,6 +521,10 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
|
||||||
pr_ret = HandshakeState::_succeeded;
|
pr_ret = HandshakeState::_succeeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!_handshakee->is_terminated()) {
|
||||||
|
StackWatermarkSet::start_processing(_handshakee, StackWatermarkKind::gc);
|
||||||
|
}
|
||||||
|
|
||||||
_active_handshaker = current_thread;
|
_active_handshaker = current_thread;
|
||||||
op->do_handshake(_handshakee);
|
op->do_handshake(_handshakee);
|
||||||
_active_handshaker = NULL;
|
_active_handshaker = NULL;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue