8254315: Shenandoah: Concurrent weak reference processing

Reviewed-by: zgu, shade
This commit is contained in:
Roman Kennke 2020-11-03 18:58:46 +00:00
parent 83f3cf4298
commit f64a15d62e
55 changed files with 1864 additions and 802 deletions

View file

@ -109,7 +109,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
__ xchg(access.resolved_addr(), value_opr, result, tmp); __ xchg(access.resolved_addr(), value_opr, result, tmp);
if (access.is_oop()) { if (access.is_oop()) {
result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), false); result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), ShenandoahBarrierSet::AccessKind::NORMAL);
LIR_Opr tmp = gen->new_register(type); LIR_Opr tmp = gen->new_register(type);
__ move(result, tmp); __ move(result, tmp);
result = tmp; result = tmp;

View file

@ -225,7 +225,7 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
} }
} }
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, bool weak) { void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, ShenandoahBarrierSet::AccessKind kind) {
assert(ShenandoahLoadRefBarrier, "Should be enabled"); assert(ShenandoahLoadRefBarrier, "Should be enabled");
assert(dst != rscratch2, "need rscratch2"); assert(dst != rscratch2, "need rscratch2");
assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2); assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
@ -252,7 +252,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ mov(r0, dst); __ mov(r0, dst);
// Test for in-cset // Test for in-cset
if (!weak) { if (kind == ShenandoahBarrierSet::AccessKind::NORMAL) {
__ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
__ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint()); __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
__ ldrb(rscratch2, Address(rscratch2, rscratch1)); __ ldrb(rscratch2, Address(rscratch2, rscratch1));
@ -260,14 +260,26 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
} }
__ push_call_clobbered_registers(); __ push_call_clobbered_registers();
if (weak) { switch (kind) {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)); case ShenandoahBarrierSet::AccessKind::NORMAL:
} else {
if (UseCompressedOops) { if (UseCompressedOops) {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
} else { } else {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
} }
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
if (UseCompressedOops) {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
} else {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
}
break;
case ShenandoahBarrierSet::AccessKind::NATIVE:
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
break;
default:
ShouldNotReachHere();
} }
__ blr(lr); __ blr(lr);
__ mov(rscratch1, r0); __ mov(rscratch1, r0);
@ -326,8 +338,8 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
bool weak = ShenandoahBarrierSet::use_load_reference_barrier_weak(decorators, type); ShenandoahBarrierSet::AccessKind kind = ShenandoahBarrierSet::access_kind(decorators, type);
load_reference_barrier(masm, dst, src, weak); load_reference_barrier(masm, dst, src, kind);
if (dst != result_dst) { if (dst != result_dst) {
__ mov(result_dst, dst); __ mov(result_dst, dst);
@ -641,10 +653,18 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
__ bind(slow_path); __ bind(slow_path);
ce->store_parameter(res, 0); ce->store_parameter(res, 0);
ce->store_parameter(addr, 1); ce->store_parameter(addr, 1);
if (stub->is_weak()) { switch (stub->kind()) {
case ShenandoahBarrierSet::AccessKind::NORMAL:
__ far_call(RuntimeAddress(bs->load_reference_barrier_normal_rt_code_blob()->code_begin()));
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
__ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
} else { break;
__ far_call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); case ShenandoahBarrierSet::AccessKind::NATIVE:
__ far_call(RuntimeAddress(bs->load_reference_barrier_native_rt_code_blob()->code_begin()));
break;
default:
ShouldNotReachHere();
} }
__ b(*stub->continuation()); __ b(*stub->continuation());
@ -700,20 +720,34 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
__ epilogue(); __ epilogue();
} }
void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool is_weak) { void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, ShenandoahBarrierSet::AccessKind kind) {
__ prologue("shenandoah_load_reference_barrier", false); __ prologue("shenandoah_load_reference_barrier", false);
// arg0 : object to be resolved // arg0 : object to be resolved
__ push_call_clobbered_registers(); __ push_call_clobbered_registers();
__ load_parameter(0, r0); __ load_parameter(0, r0);
__ load_parameter(1, r1); __ load_parameter(1, r1);
if (is_weak) { switch (kind) {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)); case ShenandoahBarrierSet::AccessKind::NORMAL:
} else if (UseCompressedOops) { if (UseCompressedOops) {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
} else { } else {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
} }
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
if (UseCompressedOops) {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
} else {
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
}
break;
case ShenandoahBarrierSet::AccessKind::NATIVE:
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
break;
default:
ShouldNotReachHere();
}
__ blr(lr); __ blr(lr);
__ mov(rscratch1, r0); __ mov(rscratch1, r0);
__ pop_call_clobbered_registers(); __ pop_call_clobbered_registers();

View file

@ -27,6 +27,7 @@
#include "asm/macroAssembler.hpp" #include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#ifdef COMPILER1 #ifdef COMPILER1
class LIR_Assembler; class LIR_Assembler;
class ShenandoahPreBarrierStub; class ShenandoahPreBarrierStub;
@ -55,7 +56,7 @@ private:
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg); void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg); void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
void load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, bool weak); void load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, ShenandoahBarrierSet::AccessKind kind);
public: public:
@ -65,7 +66,7 @@ public:
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool is_weak); void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, ShenandoahBarrierSet::AccessKind kind);
#endif #endif
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,

View file

@ -111,7 +111,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
__ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr); __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
if (access.is_oop()) { if (access.is_oop()) {
result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), false); result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), ShenandoahBarrierSet::AccessKind::NORMAL);
LIR_Opr tmp = gen->new_register(type); LIR_Opr tmp = gen->new_register(type);
__ move(result, tmp); __ move(result, tmp);
result = tmp; result = tmp;

View file

@ -268,7 +268,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
__ bind(done); __ bind(done);
} }
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, bool weak) { void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, ShenandoahBarrierSet::AccessKind kind) {
assert(ShenandoahLoadRefBarrier, "Should be enabled"); assert(ShenandoahLoadRefBarrier, "Should be enabled");
Label heap_stable, not_cset; Label heap_stable, not_cset;
@ -292,7 +292,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ jcc(Assembler::zero, heap_stable); __ jcc(Assembler::zero, heap_stable);
Register tmp1 = noreg; Register tmp1 = noreg;
if (!weak) { if (kind == ShenandoahBarrierSet::AccessKind::NORMAL) {
// Test for object in cset // Test for object in cset
// Allocate tmp-reg. // Allocate tmp-reg.
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
@ -338,14 +338,26 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ lea(tmp2, src); __ lea(tmp2, src);
save_xmm_registers(masm); save_xmm_registers(masm);
if (weak) { switch (kind) {
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), dst, tmp2); case ShenandoahBarrierSet::AccessKind::NORMAL:
} else {
if (UseCompressedOops) { if (UseCompressedOops) {
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), dst, tmp2); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), dst, tmp2);
} else { } else {
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), dst, tmp2); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), dst, tmp2);
} }
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
if (UseCompressedOops) {
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), dst, tmp2);
} else {
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), dst, tmp2);
}
break;
case ShenandoahBarrierSet::AccessKind::NATIVE:
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), dst, tmp2);
break;
default:
ShouldNotReachHere();
} }
restore_xmm_registers(masm); restore_xmm_registers(masm);
@ -370,7 +382,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ bind(not_cset); __ bind(not_cset);
if (!weak) { if (kind == ShenandoahBarrierSet::AccessKind::NORMAL) {
__ pop(tmp1); __ pop(tmp1);
} }
@ -467,8 +479,8 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
bool weak = ShenandoahBarrierSet::use_load_reference_barrier_weak(decorators, type); ShenandoahBarrierSet::AccessKind kind = ShenandoahBarrierSet::access_kind(decorators, type);
load_reference_barrier(masm, dst, src, weak); load_reference_barrier(masm, dst, src, kind);
// Move loaded oop to final destination // Move loaded oop to final destination
if (dst != result_dst) { if (dst != result_dst) {
@ -818,10 +830,18 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
__ bind(slow_path); __ bind(slow_path);
ce->store_parameter(res, 0); ce->store_parameter(res, 0);
ce->store_parameter(addr, 1); ce->store_parameter(addr, 1);
if (stub->is_weak()) { switch (stub->kind()) {
case ShenandoahBarrierSet::AccessKind::NORMAL:
__ call(RuntimeAddress(bs->load_reference_barrier_normal_rt_code_blob()->code_begin()));
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
__ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
} else { break;
__ call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); case ShenandoahBarrierSet::AccessKind::NATIVE:
__ call(RuntimeAddress(bs->load_reference_barrier_native_rt_code_blob()->code_begin()));
break;
default:
ShouldNotReachHere();
} }
__ jmp(*stub->continuation()); __ jmp(*stub->continuation());
} }
@ -886,7 +906,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
__ epilogue(); __ epilogue();
} }
void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool is_weak) { void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, ShenandoahBarrierSet::AccessKind kind) {
__ prologue("shenandoah_load_reference_barrier", false); __ prologue("shenandoah_load_reference_barrier", false);
// arg0 : object to be resolved // arg0 : object to be resolved
@ -895,20 +915,40 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
#ifdef _LP64 #ifdef _LP64
__ load_parameter(0, c_rarg0); __ load_parameter(0, c_rarg0);
__ load_parameter(1, c_rarg1); __ load_parameter(1, c_rarg1);
if (is_weak) { switch (kind) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1); case ShenandoahBarrierSet::AccessKind::NORMAL:
} else if (UseCompressedOops) { if (UseCompressedOops) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), c_rarg0, c_rarg1); __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), c_rarg0, c_rarg1);
} else { } else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0, c_rarg1); __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0, c_rarg1);
} }
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
if (UseCompressedOops) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
}
break;
case ShenandoahBarrierSet::AccessKind::NATIVE:
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
break;
default:
ShouldNotReachHere();
}
#else #else
__ load_parameter(0, rax); __ load_parameter(0, rax);
__ load_parameter(1, rbx); __ load_parameter(1, rbx);
if (is_weak) { switch (kind) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), rax, rbx); case ShenandoahBarrierSet::AccessKind::NORMAL:
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx); __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx);
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
case ShenandoahBarrierSet::AccessKind::NATIVE:
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), rax, rbx);
break;
default:
ShouldNotReachHere();
} }
#endif #endif

View file

@ -27,6 +27,8 @@
#include "asm/macroAssembler.hpp" #include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#ifdef COMPILER1 #ifdef COMPILER1
class LIR_Assembler; class LIR_Assembler;
class ShenandoahPreBarrierStub; class ShenandoahPreBarrierStub;
@ -62,10 +64,10 @@ public:
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool is_weak); void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, ShenandoahBarrierSet::AccessKind kind);
#endif #endif
void load_reference_barrier(MacroAssembler* masm, Register dst, Address src, bool weak); void load_reference_barrier(MacroAssembler* masm, Register dst, Address src, ShenandoahBarrierSet::AccessKind kind);
void cmpxchg_oop(MacroAssembler* masm, void cmpxchg_oop(MacroAssembler* masm,
Register res, Address addr, Register oldval, Register newval, Register res, Address addr, Register oldval, Register newval,

View file

@ -51,7 +51,9 @@ void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() : ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
_pre_barrier_c1_runtime_code_blob(NULL), _pre_barrier_c1_runtime_code_blob(NULL),
_load_reference_barrier_rt_code_blob(NULL) {} _load_reference_barrier_normal_rt_code_blob(NULL),
_load_reference_barrier_native_rt_code_blob(NULL),
_load_reference_barrier_weak_rt_code_blob(NULL) {}
void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) { void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
// First we test whether marking is in progress. // First we test whether marking is in progress.
@ -107,15 +109,15 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
__ branch_destination(slow->continuation()); __ branch_destination(slow->continuation());
} }
LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool is_native) { LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, ShenandoahBarrierSet::AccessKind kind) {
if (ShenandoahLoadRefBarrier) { if (ShenandoahLoadRefBarrier) {
return load_reference_barrier_impl(gen, obj, addr, is_native); return load_reference_barrier_impl(gen, obj, addr, kind);
} else { } else {
return obj; return obj;
} }
} }
LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool is_native) { LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, ShenandoahBarrierSet::AccessKind kind) {
assert(ShenandoahLoadRefBarrier, "Should be enabled"); assert(ShenandoahLoadRefBarrier, "Should be enabled");
obj = ensure_in_register(gen, obj, T_OBJECT); obj = ensure_in_register(gen, obj, T_OBJECT);
@ -148,7 +150,7 @@ LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, L
} }
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, is_native); CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, kind);
__ branch(lir_cond_notEqual, slow); __ branch(lir_cond_notEqual, slow);
__ branch_destination(slow->continuation()); __ branch_destination(slow->continuation());
@ -211,8 +213,8 @@ void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result)
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
LIR_Opr tmp = gen->new_register(T_OBJECT); LIR_Opr tmp = gen->new_register(T_OBJECT);
BarrierSetC1::load_at_resolved(access, tmp); BarrierSetC1::load_at_resolved(access, tmp);
bool is_weak = ShenandoahBarrierSet::use_load_reference_barrier_weak(decorators, type); ShenandoahBarrierSet::AccessKind kind = ShenandoahBarrierSet::access_kind(decorators, type);
tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), is_weak); tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), kind);
__ move(tmp, result); __ move(tmp, result);
} else { } else {
BarrierSetC1::load_at_resolved(access, result); BarrierSetC1::load_at_resolved(access, result);
@ -251,14 +253,14 @@ class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure
class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
private: private:
const bool _is_weak; const ShenandoahBarrierSet::AccessKind _kind;
public: public:
C1ShenandoahLoadReferenceBarrierCodeGenClosure(bool is_weak) : _is_weak(is_weak) {} C1ShenandoahLoadReferenceBarrierCodeGenClosure(ShenandoahBarrierSet::AccessKind kind) : _kind(kind) {}
virtual OopMapSet* generate_code(StubAssembler* sasm) { virtual OopMapSet* generate_code(StubAssembler* sasm) {
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _is_weak); bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _kind);
return NULL; return NULL;
} }
}; };
@ -269,12 +271,17 @@ void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob)
"shenandoah_pre_barrier_slow", "shenandoah_pre_barrier_slow",
false, &pre_code_gen_cl); false, &pre_code_gen_cl);
if (ShenandoahLoadRefBarrier) { if (ShenandoahLoadRefBarrier) {
C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl(false); C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl(ShenandoahBarrierSet::AccessKind::NORMAL);
_load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, _load_reference_barrier_normal_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
"shenandoah_load_reference_barrier_slow", "shenandoah_load_reference_barrier_slow",
false, &lrb_code_gen_cl); false, &lrb_code_gen_cl);
C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(true); C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_native_code_gen_cl(ShenandoahBarrierSet::AccessKind::NATIVE);
_load_reference_barrier_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
"shenandoah_load_reference_barrier_native_slow",
false, &lrb_native_code_gen_cl);
C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ShenandoahBarrierSet::AccessKind::WEAK);
_load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
"shenandoah_load_reference_barrier_weak_slow", "shenandoah_load_reference_barrier_weak_slow",
false, &lrb_weak_code_gen_cl); false, &lrb_weak_code_gen_cl);

View file

@ -94,10 +94,10 @@ private:
LIR_Opr _result; LIR_Opr _result;
LIR_Opr _tmp1; LIR_Opr _tmp1;
LIR_Opr _tmp2; LIR_Opr _tmp2;
bool _is_weak; ShenandoahBarrierSet::AccessKind _kind;
public: public:
ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2, bool is_weak) : ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2, ShenandoahBarrierSet::AccessKind kind) :
_obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2), _is_weak(is_weak) _obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2), _kind(kind)
{ {
assert(_obj->is_register(), "should be register"); assert(_obj->is_register(), "should be register");
assert(_addr->is_register(), "should be register"); assert(_addr->is_register(), "should be register");
@ -111,7 +111,7 @@ public:
LIR_Opr result() const { return _result; } LIR_Opr result() const { return _result; }
LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp1() const { return _tmp1; }
LIR_Opr tmp2() const { return _tmp2; } LIR_Opr tmp2() const { return _tmp2; }
bool is_weak() const { return _is_weak; } ShenandoahBarrierSet::AccessKind kind() const { return _kind; }
virtual void emit_code(LIR_Assembler* e); virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) { virtual void visit(LIR_OpVisitState* visitor) {
@ -190,15 +190,16 @@ public:
class ShenandoahBarrierSetC1 : public BarrierSetC1 { class ShenandoahBarrierSetC1 : public BarrierSetC1 {
private: private:
CodeBlob* _pre_barrier_c1_runtime_code_blob; CodeBlob* _pre_barrier_c1_runtime_code_blob;
CodeBlob* _load_reference_barrier_rt_code_blob; CodeBlob* _load_reference_barrier_normal_rt_code_blob;
CodeBlob* _load_reference_barrier_native_rt_code_blob;
CodeBlob* _load_reference_barrier_weak_rt_code_blob; CodeBlob* _load_reference_barrier_weak_rt_code_blob;
void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val); void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val);
LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool is_native); LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, ShenandoahBarrierSet::AccessKind kind);
LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators); LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators);
LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool is_native); LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, ShenandoahBarrierSet::AccessKind kind);
LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type); LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type);
@ -210,9 +211,14 @@ public:
return _pre_barrier_c1_runtime_code_blob; return _pre_barrier_c1_runtime_code_blob;
} }
CodeBlob* load_reference_barrier_rt_code_blob() { CodeBlob* load_reference_barrier_normal_rt_code_blob() {
assert(_load_reference_barrier_rt_code_blob != NULL, ""); assert(_load_reference_barrier_normal_rt_code_blob != NULL, "");
return _load_reference_barrier_rt_code_blob; return _load_reference_barrier_normal_rt_code_blob;
}
CodeBlob* load_reference_barrier_native_rt_code_blob() {
assert(_load_reference_barrier_native_rt_code_blob != NULL, "");
return _load_reference_barrier_native_rt_code_blob;
} }
CodeBlob* load_reference_barrier_weak_rt_code_blob() { CodeBlob* load_reference_barrier_weak_rt_code_blob() {

View file

@ -305,7 +305,8 @@ bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) {
address entry_point = call->as_CallLeaf()->entry_point(); address entry_point = call->as_CallLeaf()->entry_point();
return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) || return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) ||
(entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)) || (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)) ||
(entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)); (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)) ||
(entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
} }
bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
@ -545,9 +546,8 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
// 2: apply LRB if needed // 2: apply LRB if needed
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
load = new ShenandoahLoadReferenceBarrierNode(NULL, ShenandoahBarrierSet::AccessKind kind = ShenandoahBarrierSet::access_kind(decorators, type);
load, load = new ShenandoahLoadReferenceBarrierNode(NULL, load, kind);
ShenandoahBarrierSet::use_load_reference_barrier_weak(decorators, type));
if (access.is_parse_access()) { if (access.is_parse_access()) {
load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load); load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
} else { } else {
@ -644,7 +644,7 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
} }
#endif #endif
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store, false)); load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store, ShenandoahBarrierSet::AccessKind::NORMAL));
return load_store; return load_store;
} }
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
@ -712,7 +712,7 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
} }
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
if (access.is_oop()) { if (access.is_oop()) {
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result, false)); result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result, ShenandoahBarrierSet::AccessKind::NORMAL));
shenandoah_write_barrier_pre(kit, false /* do_load */, shenandoah_write_barrier_pre(kit, false /* do_load */,
NULL, NULL, max_juint, NULL, NULL, NULL, NULL, max_juint, NULL, NULL,
result /* pre_val */, T_OBJECT); result /* pre_val */, T_OBJECT);
@ -1060,15 +1060,15 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
Node* in1 = n->in(1); Node* in1 = n->in(1);
Node* in2 = n->in(2); Node* in2 = n->in(2);
// If one input is NULL, then step over the barriers (except LRB native) on the other input // If one input is NULL, then step over the barriers normal LRB barriers on the other input
if (in1->bottom_type() == TypePtr::NULL_PTR && if (in1->bottom_type() == TypePtr::NULL_PTR &&
!((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) && !((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
((ShenandoahLoadReferenceBarrierNode*)in2)->is_weak())) { ((ShenandoahLoadReferenceBarrierNode*)in2)->kind() != ShenandoahBarrierSet::AccessKind::NORMAL)) {
in2 = step_over_gc_barrier(in2); in2 = step_over_gc_barrier(in2);
} }
if (in2->bottom_type() == TypePtr::NULL_PTR && if (in2->bottom_type() == TypePtr::NULL_PTR &&
!((in1->Opcode() == Op_ShenandoahLoadReferenceBarrier) && !((in1->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
((ShenandoahLoadReferenceBarrierNode*)in1)->is_weak())) { ((ShenandoahLoadReferenceBarrierNode*)in1)->kind() != ShenandoahBarrierSet::AccessKind::NORMAL)) {
in1 = step_over_gc_barrier(in1); in1 = step_over_gc_barrier(in1);
} }

View file

@ -956,7 +956,8 @@ void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl,
phase->register_new_node(cset_bool, old_ctrl); phase->register_new_node(cset_bool, old_ctrl);
} }
void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_weak, PhaseIdealLoop* phase) { void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem,
ShenandoahBarrierSet::AccessKind kind, PhaseIdealLoop* phase) {
IdealLoopTree*loop = phase->get_loop(ctrl); IdealLoopTree*loop = phase->get_loop(ctrl);
const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr(); const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
@ -967,13 +968,28 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
phase->register_new_node(mm, ctrl); phase->register_new_node(mm, ctrl);
address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ? address calladdr = NULL;
const char* name = NULL;
switch (kind) {
case ShenandoahBarrierSet::AccessKind::NATIVE:
calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
name = "load_reference_barrier_native";
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
calladdr = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow) :
CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
name = "load_reference_barrier_weak";
break;
case ShenandoahBarrierSet::AccessKind::NORMAL:
calladdr = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ?
CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) : CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) :
CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier); CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier);
name = "load_reference_barrier";
address calladdr = is_weak ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak) break;
: target; default:
const char* name = is_weak ? "load_reference_barrier_native" : "load_reference_barrier"; ShouldNotReachHere();
}
Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM); Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
call->init_req(TypeFunc::Control, ctrl); call->init_req(TypeFunc::Control, ctrl);
@ -1338,7 +1354,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
// even for non-cset objects to prevent ressurrection of such objects. // even for non-cset objects to prevent ressurrection of such objects.
// Wires !in_cset(obj) to slot 2 of region and phis // Wires !in_cset(obj) to slot 2 of region and phis
Node* not_cset_ctrl = NULL; Node* not_cset_ctrl = NULL;
if (!lrb->is_weak()) { if (lrb->kind() == ShenandoahBarrierSet::AccessKind::NORMAL) {
test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase); test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
} }
if (not_cset_ctrl != NULL) { if (not_cset_ctrl != NULL) {
@ -1389,7 +1405,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
} }
} }
} }
call_lrb_stub(ctrl, val, addr, result_mem, raw_mem, lrb->is_weak(), phase); call_lrb_stub(ctrl, val, addr, result_mem, raw_mem, lrb->kind(), phase);
region->init_req(_evac_path, ctrl); region->init_req(_evac_path, ctrl);
val_phi->init_req(_evac_path, val); val_phi->init_req(_evac_path, val);
raw_mem_phi->init_req(_evac_path, result_mem); raw_mem_phi->init_req(_evac_path, result_mem);
@ -2885,13 +2901,13 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
} }
} }
ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, bool weak) ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, ShenandoahBarrierSet::AccessKind kind)
: Node(ctrl, obj), _weak(weak) { : Node(ctrl, obj), _kind(kind) {
ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this); ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
} }
bool ShenandoahLoadReferenceBarrierNode::is_weak() const { ShenandoahBarrierSet::AccessKind ShenandoahLoadReferenceBarrierNode::kind() const {
return _weak; return _kind;
} }
uint ShenandoahLoadReferenceBarrierNode::size_of() const { uint ShenandoahLoadReferenceBarrierNode::size_of() const {
@ -2899,12 +2915,26 @@ uint ShenandoahLoadReferenceBarrierNode::size_of() const {
} }
uint ShenandoahLoadReferenceBarrierNode::hash() const { uint ShenandoahLoadReferenceBarrierNode::hash() const {
return Node::hash() + (_weak ? 1 : 0); uint hash = Node::hash();
switch (_kind) {
case ShenandoahBarrierSet::AccessKind::NORMAL:
hash += 0;
break;
case ShenandoahBarrierSet::AccessKind::WEAK:
hash += 1;
break;
case ShenandoahBarrierSet::AccessKind::NATIVE:
hash += 2;
break;
default:
ShouldNotReachHere();
}
return hash;
} }
bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const { bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier && return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
_weak == ((const ShenandoahLoadReferenceBarrierNode&)n)._weak; _kind == ((const ShenandoahLoadReferenceBarrierNode&)n)._kind;
} }
const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {

View file

@ -25,6 +25,7 @@
#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP #ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP
#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP #define SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "opto/addnode.hpp" #include "opto/addnode.hpp"
#include "opto/graphKit.hpp" #include "opto/graphKit.hpp"
@ -60,7 +61,8 @@ private:
static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase); static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase);
static void test_gc_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, static void test_gc_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
PhaseIdealLoop* phase, int flags); PhaseIdealLoop* phase, int flags);
static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_weak, PhaseIdealLoop* phase); static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem,
ShenandoahBarrierSet::AccessKind kind, PhaseIdealLoop* phase);
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase); static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
static void move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase); static void move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase); static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
@ -229,12 +231,12 @@ public:
}; };
private: private:
bool _weak; ShenandoahBarrierSet::AccessKind _kind;
public: public:
ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val, bool native); ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val, ShenandoahBarrierSet::AccessKind kind);
bool is_weak() const; ShenandoahBarrierSet::AccessKind kind() const;
virtual int Opcode() const; virtual int Opcode() const;
virtual const Type* bottom_type() const; virtual const Type* bottom_type() const;
virtual const Type* Value(PhaseGVN* phase) const; virtual const Type* Value(PhaseGVN* phase) const;

View file

@ -61,12 +61,6 @@ bool ShenandoahAggressiveHeuristics::should_start_gc() const {
return true; return true;
} }
bool ShenandoahAggressiveHeuristics::should_process_references() {
if (!can_process_references()) return false;
// Randomly process refs with 50% chance.
return (os::random() & 1) == 1;
}
bool ShenandoahAggressiveHeuristics::should_unload_classes() { bool ShenandoahAggressiveHeuristics::should_unload_classes() {
if (!can_unload_classes_normal()) return false; if (!can_unload_classes_normal()) return false;
if (has_metaspace_oom()) return true; if (has_metaspace_oom()) return true;

View file

@ -37,8 +37,6 @@ public:
virtual bool should_start_gc() const; virtual bool should_start_gc() const;
virtual bool should_process_references();
virtual bool should_unload_classes(); virtual bool should_unload_classes();
virtual const char* name() { return "Aggressive"; } virtual const char* name() { return "Aggressive"; }

View file

@ -257,18 +257,6 @@ void ShenandoahHeuristics::record_requested_gc() {
_gc_times_learned = 0; _gc_times_learned = 0;
} }
bool ShenandoahHeuristics::can_process_references() {
if (ShenandoahRefProcFrequency == 0) return false;
return true;
}
bool ShenandoahHeuristics::should_process_references() {
if (!can_process_references()) return false;
size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter();
// Process references every Nth GC cycle.
return cycle % ShenandoahRefProcFrequency == 0;
}
bool ShenandoahHeuristics::can_unload_classes() { bool ShenandoahHeuristics::can_unload_classes() {
if (!ClassUnloading) return false; if (!ClassUnloading) return false;
return true; return true;

View file

@ -120,9 +120,6 @@ public:
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set); virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual bool can_process_references();
virtual bool should_process_references();
virtual bool can_unload_classes(); virtual bool can_unload_classes();
virtual bool can_unload_classes_normal(); virtual bool can_unload_classes_normal();
virtual bool should_unload_classes(); virtual bool should_unload_classes();

View file

@ -36,11 +36,6 @@ bool ShenandoahPassiveHeuristics::should_start_gc() const {
return false; return false;
} }
bool ShenandoahPassiveHeuristics::should_process_references() {
// Always process references, if we can.
return can_process_references();
}
bool ShenandoahPassiveHeuristics::should_unload_classes() { bool ShenandoahPassiveHeuristics::should_unload_classes() {
// Always unload classes, if we can. // Always unload classes, if we can.
return can_unload_classes(); return can_unload_classes();

View file

@ -31,8 +31,6 @@ class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
public: public:
virtual bool should_start_gc() const; virtual bool should_start_gc() const;
virtual bool should_process_references();
virtual bool should_unload_classes(); virtual bool should_unload_classes();
virtual bool should_degenerate_cycle(); virtual bool should_degenerate_cycle();

View file

@ -68,7 +68,8 @@ void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) {
msg.append(" " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name()); msg.append(" " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name());
msg.append(" %3s allocated after mark start\n", ctx->allocated_after_mark_start(obj) ? "" : "not"); msg.append(" %3s allocated after mark start\n", ctx->allocated_after_mark_start(obj) ? "" : "not");
msg.append(" %3s after update watermark\n", cast_from_oop<HeapWord*>(obj) >= r->get_update_watermark() ? "" : "not"); msg.append(" %3s after update watermark\n", cast_from_oop<HeapWord*>(obj) >= r->get_update_watermark() ? "" : "not");
msg.append(" %3s marked \n", ctx->is_marked(obj) ? "" : "not"); msg.append(" %3s marked strong\n", ctx->is_marked_strong(obj) ? "" : "not");
msg.append(" %3s marked weak\n", ctx->is_marked_weak(obj) ? "" : "not");
msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not"); msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not");
msg.append(" mark:%s\n", mw_ss.as_string()); msg.append(" mark:%s\n", mw_ss.as_string());
msg.append(" region: %s", ss.as_string()); msg.append(" region: %s", ss.as_string());
@ -353,24 +354,6 @@ void ShenandoahAsserts::print_rp_failure(const char *label, BoolObjectClosure* a
report_vm_error(file, line, msg.buffer()); report_vm_error(file, line, msg.buffer());
} }
void ShenandoahAsserts::assert_rp_isalive_not_installed(const char *file, int line) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
ReferenceProcessor* rp = heap->ref_processor();
if (rp->is_alive_non_header() != NULL) {
print_rp_failure("Shenandoah assert_rp_isalive_not_installed failed", rp->is_alive_non_header(),
file, line);
}
}
void ShenandoahAsserts::assert_rp_isalive_installed(const char *file, int line) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
ReferenceProcessor* rp = heap->ref_processor();
if (rp->is_alive_non_header() == NULL) {
print_rp_failure("Shenandoah assert_rp_isalive_installed failed", rp->is_alive_non_header(),
file, line);
}
}
void ShenandoahAsserts::assert_locked_or_shenandoah_safepoint(Mutex* lock, const char* file, int line) { void ShenandoahAsserts::assert_locked_or_shenandoah_safepoint(Mutex* lock, const char* file, int line) {
if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
return; return;

View file

@ -65,9 +65,6 @@ public:
static void assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line); static void assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line);
static void assert_not_in_cset_loc(void* interior_loc, const char* file, int line); static void assert_not_in_cset_loc(void* interior_loc, const char* file, int line);
static void assert_rp_isalive_not_installed(const char *file, int line);
static void assert_rp_isalive_installed(const char *file, int line);
static void assert_locked_or_shenandoah_safepoint(Mutex* lock, const char* file, int line); static void assert_locked_or_shenandoah_safepoint(Mutex* lock, const char* file, int line);
static void assert_heaplocked(const char* file, int line); static void assert_heaplocked(const char* file, int line);

View file

@ -87,17 +87,6 @@ bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators,
return is_reference_type(type); return is_reference_type(type);
} }
bool ShenandoahBarrierSet::use_load_reference_barrier_weak(DecoratorSet decorators, BasicType type) {
assert(need_load_reference_barrier(decorators, type), "Should be subset of LRB");
assert(is_reference_type(type), "Why we here?");
// Native load reference barrier is only needed for concurrent root processing
if (!ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
return false;
}
return ((decorators & IN_NATIVE) != 0) && ((decorators & ON_STRONG_OOP_REF) == 0);
}
bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) { bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) {
if (!ShenandoahSATBBarrier) return false; if (!ShenandoahSATBBarrier) return false;
// Only needed for references // Only needed for references
@ -109,6 +98,16 @@ bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,Basic
return (on_weak_ref || unknown) && keep_alive; return (on_weak_ref || unknown) && keep_alive;
} }
ShenandoahBarrierSet::AccessKind ShenandoahBarrierSet::access_kind(DecoratorSet decorators, BasicType type) {
if ((decorators & IN_NATIVE) != 0) {
return AccessKind::NATIVE;
} else if ((decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF)) != 0) {
return AccessKind::WEAK;
} else {
return AccessKind::NORMAL;
}
}
void ShenandoahBarrierSet::on_thread_create(Thread* thread) { void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
// Create thread local data // Create thread local data
ShenandoahThreadLocalData::create(thread); ShenandoahThreadLocalData::create(thread);

View file

@ -33,8 +33,19 @@
class ShenandoahBarrierSetAssembler; class ShenandoahBarrierSetAssembler;
class ShenandoahBarrierSet: public BarrierSet { class ShenandoahBarrierSet: public BarrierSet {
private: public:
enum class AccessKind {
// Regular in-heap access on reference fields
NORMAL,
// Off-heap reference access
NATIVE,
// In-heap reference access on referent fields of j.l.r.Reference objects
WEAK
};
private:
ShenandoahHeap* _heap; ShenandoahHeap* _heap;
BufferNode::Allocator _satb_mark_queue_buffer_allocator; BufferNode::Allocator _satb_mark_queue_buffer_allocator;
ShenandoahSATBMarkQueueSet _satb_mark_queue_set; ShenandoahSATBMarkQueueSet _satb_mark_queue_set;
@ -53,8 +64,8 @@ public:
} }
static bool need_load_reference_barrier(DecoratorSet decorators, BasicType type); static bool need_load_reference_barrier(DecoratorSet decorators, BasicType type);
static bool use_load_reference_barrier_weak(DecoratorSet decorators, BasicType type);
static bool need_keep_alive_barrier(DecoratorSet decorators, BasicType type); static bool need_keep_alive_barrier(DecoratorSet decorators, BasicType type);
static AccessKind access_kind(DecoratorSet decorators, BasicType type);
void print_on(outputStream* st) const; void print_on(outputStream* st) const;

View file

@ -198,7 +198,7 @@ template <typename T>
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) { inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
oop value = Raw::oop_load_in_heap(addr); oop value = Raw::oop_load_in_heap(addr);
ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set();
value = bs->load_reference_barrier(value); value = bs->load_reference_barrier<decorators, T>(value, addr);
bs->keep_alive_if_weak<decorators>(value); bs->keep_alive_if_weak<decorators>(value);
return value; return value;
} }
@ -207,9 +207,9 @@ template <DecoratorSet decorators, typename BarrierSetT>
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) { inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
oop value = Raw::oop_load_in_heap_at(base, offset); oop value = Raw::oop_load_in_heap_at(base, offset);
ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set();
value = bs->load_reference_barrier(value); DecoratorSet resolved_decorators = AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset);
bs->keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value = bs->load_reference_barrier<decorators>(value, AccessInternal::oop_field_addr<decorators>(base, offset));
value); bs->keep_alive_if_weak(resolved_decorators, value);
return value; return value;
} }
@ -217,6 +217,7 @@ template <DecoratorSet decorators, typename BarrierSetT>
template <typename T> template <typename T>
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_not_in_heap(T* addr, oop value) { inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_not_in_heap(T* addr, oop value) {
shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress()); shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
shenandoah_assert_not_in_cset_if(addr, value, value != NULL && !ShenandoahHeap::heap()->cancelled_gc());
ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set(); ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set();
bs->storeval_barrier(value); bs->storeval_barrier(value);
bs->satb_barrier<decorators>(addr); bs->satb_barrier<decorators>(addr);
@ -339,7 +340,7 @@ void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) {
oop witness = ShenandoahHeap::cas_oop(fwd, elem_ptr, o); oop witness = ShenandoahHeap::cas_oop(fwd, elem_ptr, o);
obj = fwd; obj = fwd;
} }
if (ENQUEUE && !ctx->is_marked(obj)) { if (ENQUEUE && !ctx->is_marked_strong(obj)) {
queue.enqueue_known_active(obj); queue.enqueue_known_active(obj);
} }
} }

View file

@ -31,8 +31,6 @@
#include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/strongRootsScope.hpp" #include "gc/shared/strongRootsScope.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
@ -40,6 +38,7 @@
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahMarkCompact.hpp" #include "gc/shenandoah/shenandoahMarkCompact.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
@ -61,7 +60,7 @@ private:
template <class T> template <class T>
inline void do_oop_work(T* p) { inline void do_oop_work(T* p) {
ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context); ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context, false);
} }
public: public:
@ -74,11 +73,12 @@ public:
void do_oop(oop* p) { do_oop_work(p); } void do_oop(oop* p) { do_oop_work(p); }
}; };
ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
MetadataVisitingOopIterateClosure(rp), MetadataVisitingOopIterateClosure(rp),
_queue(q), _queue(q),
_heap(ShenandoahHeap::heap()), _heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()) _mark_context(_heap->marking_context()),
_weak(false)
{ } { }
template<UpdateRefsMode UPDATE_REFS> template<UpdateRefsMode UPDATE_REFS>
@ -153,14 +153,8 @@ public:
ShenandoahConcurrentWorkerSession worker_session(worker_id); ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
ReferenceProcessor* rp; ShenandoahReferenceProcessor* rp = heap->ref_processor();
if (heap->process_references()) { assert(rp != NULL, "need reference processor");
rp = heap->ref_processor();
shenandoah_assert_rp_isalive_installed();
} else {
rp = NULL;
}
_cm->mark_loop(worker_id, _terminator, rp, _cm->mark_loop(worker_id, _terminator, rp,
true, // cancellable true, // cancellable
ShenandoahStringDedup::is_enabled()); // perform string dedup ShenandoahStringDedup::is_enabled()); // perform string dedup
@ -206,7 +200,7 @@ class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask {
private: private:
ShenandoahConcurrentRootScanner<false /* concurrent */> _rs; ShenandoahConcurrentRootScanner<false /* concurrent */> _rs;
ShenandoahConcurrentMark* const _cm; ShenandoahConcurrentMark* const _cm;
ReferenceProcessor* _rp; ShenandoahReferenceProcessor* _rp;
public: public:
ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm, ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
@ -222,12 +216,7 @@ ShenandoahProcessConcurrentRootsTask<T>::ShenandoahProcessConcurrentRootsTask(Sh
AbstractGangTask("Shenandoah Process Concurrent Roots"), AbstractGangTask("Shenandoah Process Concurrent Roots"),
_rs(nworkers, phase), _rs(nworkers, phase),
_cm(cm), _cm(cm),
_rp(NULL) { _rp(ShenandoahHeap::heap()->ref_processor()) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (heap->process_references()) {
_rp = heap->ref_processor();
shenandoah_assert_rp_isalive_installed();
}
} }
template <typename T> template <typename T>
@ -253,13 +242,7 @@ public:
ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahParallelWorkerSession worker_session(worker_id); ShenandoahParallelWorkerSession worker_session(worker_id);
ReferenceProcessor* rp; ShenandoahReferenceProcessor* rp = heap->ref_processor();
if (heap->process_references()) {
rp = heap->ref_processor();
shenandoah_assert_rp_isalive_installed();
} else {
rp = NULL;
}
// First drain remaining SATB buffers. // First drain remaining SATB buffers.
// Notice that this is not strictly necessary for mark-compact. But since // Notice that this is not strictly necessary for mark-compact. But since
@ -303,9 +286,12 @@ void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_pha
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahGCPhase phase(root_phase); ShenandoahGCPhase phase(root_phase);
ShenandoahReferenceProcessor* ref_processor = heap->ref_processor();
ref_processor->reset_thread_locals();
ref_processor->set_soft_reference_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
WorkGang* workers = heap->workers(); WorkGang* workers = heap->workers();
uint nworkers = workers->active_workers(); uint nworkers = workers->active_workers();
@ -410,18 +396,18 @@ private:
SuspendibleThreadSetJoiner _sts_joiner; SuspendibleThreadSetJoiner _sts_joiner;
ShenandoahConcurrentRootScanner<true /* concurrent */> _rs; ShenandoahConcurrentRootScanner<true /* concurrent */> _rs;
ShenandoahObjToScanQueueSet* const _queue_set; ShenandoahObjToScanQueueSet* const _queue_set;
ReferenceProcessor* const _rp; ShenandoahReferenceProcessor* const _rp;
public: public:
ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
ReferenceProcessor* rp, ShenandoahReferenceProcessor* rp,
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::Phase phase,
uint nworkers); uint nworkers);
void work(uint worker_id); void work(uint worker_id);
}; };
ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
ReferenceProcessor* rp, ShenandoahReferenceProcessor* rp,
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::Phase phase,
uint nworkers) : uint nworkers) :
AbstractGangTask("Shenandoah Concurrent Mark Roots"), AbstractGangTask("Shenandoah Concurrent Mark Roots"),
@ -442,19 +428,7 @@ void ShenandoahConcurrentMark::mark_from_roots() {
WorkGang* workers = _heap->workers(); WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers(); uint nworkers = workers->active_workers();
ReferenceProcessor* rp = NULL; ShenandoahReferenceProcessor* rp = _heap->ref_processor();
if (_heap->process_references()) {
rp = _heap->ref_processor();
rp->set_active_mt_degree(nworkers);
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_no_refs*/);
rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
}
shenandoah_assert_rp_isalive_not_installed();
ShenandoahIsAliveSelector is_alive;
ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
task_queues()->reserve(nworkers); task_queues()->reserve(nworkers);
@ -480,10 +454,6 @@ void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
uint nworkers = _heap->workers()->active_workers(); uint nworkers = _heap->workers()->active_workers();
{ {
shenandoah_assert_rp_isalive_not_installed();
ShenandoahIsAliveSelector is_alive;
ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
// Full GC does not execute concurrent cycle. Degenerated cycle may bypass concurrent cycle. // Full GC does not execute concurrent cycle. Degenerated cycle may bypass concurrent cycle.
// In those cases, concurrent roots might not be scanned, scan them here. Ideally, this // In those cases, concurrent roots might not be scanned, scan them here. Ideally, this
// should piggyback to ShenandoahFinalMarkingTask, but it makes time tracking very hard. // should piggyback to ShenandoahFinalMarkingTask, but it makes time tracking very hard.
@ -524,343 +494,11 @@ void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
assert(task_queues()->is_empty(), "Should be empty"); assert(task_queues()->is_empty(), "Should be empty");
} }
// When we're done marking everything, we process weak references.
if (_heap->process_references()) {
weak_refs_work(full_gc);
}
assert(task_queues()->is_empty(), "Should be empty"); assert(task_queues()->is_empty(), "Should be empty");
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
} }
// Weak Reference Closures
class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
uint _worker_id;
TaskTerminator* _terminator;
bool _reset_terminator;
public:
ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
_worker_id(worker_id),
_terminator(t),
_reset_terminator(reset_terminator) {
}
void do_void() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahHeap* sh = ShenandoahHeap::heap();
ShenandoahConcurrentMark* scm = sh->concurrent_mark();
assert(sh->process_references(), "why else would we be here?");
ReferenceProcessor* rp = sh->ref_processor();
shenandoah_assert_rp_isalive_installed();
scm->mark_loop(_worker_id, _terminator, rp,
false, // not cancellable
false); // do not do strdedup
if (_reset_terminator) {
_terminator->reset_for_reuse();
}
}
};
class ShenandoahCMKeepAliveClosure : public OopClosure {
private:
ShenandoahObjToScanQueue* _queue;
ShenandoahHeap* _heap;
ShenandoahMarkingContext* const _mark_context;
template <class T>
inline void do_oop_work(T* p) {
ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
}
public:
ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
_queue(q),
_heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()) {}
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};
class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
private:
ShenandoahObjToScanQueue* _queue;
ShenandoahHeap* _heap;
ShenandoahMarkingContext* const _mark_context;
template <class T>
inline void do_oop_work(T* p) {
ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
}
public:
ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
_queue(q),
_heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()) {}
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};
class ShenandoahWeakUpdateClosure : public OopClosure {
private:
ShenandoahHeap* const _heap;
template <class T>
inline void do_oop_work(T* p) {
oop o = _heap->maybe_update_with_forwarded(p);
shenandoah_assert_marked_except(p, o, o == NULL);
}
public:
ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};
class ShenandoahRefProcTaskProxy : public AbstractGangTask {
private:
AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
TaskTerminator* _terminator;
public:
ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
TaskTerminator* t) :
AbstractGangTask("Shenandoah Process Weak References"),
_proc_task(proc_task),
_terminator(t) {
}
void work(uint worker_id) {
Thread* current_thread = Thread::current();
ResourceMark rm(current_thread);
HandleMark hm(current_thread);
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
if (heap->has_forwarded_objects()) {
ShenandoahForwardedIsAliveClosure is_alive;
ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
_proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
} else {
ShenandoahIsAliveClosure is_alive;
ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
_proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
}
}
};
class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
private:
WorkGang* _workers;
public:
ShenandoahRefProcTaskExecutor(WorkGang* workers) :
_workers(workers) {
}
// Executes a task using worker threads.
void execute(ProcessTask& task, uint ergo_workers) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahConcurrentMark* cm = heap->concurrent_mark();
ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
ergo_workers,
/* do_check = */ false);
uint nworkers = _workers->active_workers();
cm->task_queues()->reserve(nworkers);
TaskTerminator terminator(nworkers, cm->task_queues());
ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
_workers->run_task(&proc_task_proxy);
}
};
void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
assert(_heap->process_references(), "sanity");
ShenandoahPhaseTimings::Phase phase_root =
full_gc ?
ShenandoahPhaseTimings::full_gc_weakrefs :
ShenandoahPhaseTimings::weakrefs;
ShenandoahGCPhase phase(phase_root);
ReferenceProcessor* rp = _heap->ref_processor();
// NOTE: We cannot shortcut on has_discovered_references() here, because
// we will miss marking JNI Weak refs then, see implementation in
// ReferenceProcessor::process_discovered_references.
weak_refs_work_doit(full_gc);
rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "Post condition");
}
void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
ReferenceProcessor* rp = _heap->ref_processor();
ShenandoahPhaseTimings::Phase phase_process =
full_gc ?
ShenandoahPhaseTimings::full_gc_weakrefs_process :
ShenandoahPhaseTimings::weakrefs_process;
shenandoah_assert_rp_isalive_not_installed();
ShenandoahIsAliveSelector is_alive;
ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers();
rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
rp->set_active_mt_degree(nworkers);
assert(task_queues()->is_empty(), "Should be empty");
// complete_gc and keep_alive closures instantiated here are only needed for
// single-threaded path in RP. They share the queue 0 for tracking work, which
// simplifies implementation. Since RP may decide to call complete_gc several
// times, we need to be able to reuse the terminator.
uint serial_worker_id = 0;
TaskTerminator terminator(1, task_queues());
ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
ShenandoahRefProcTaskExecutor executor(workers);
ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
{
// Note: Don't emit JFR event for this phase, to avoid overflow nesting phase level.
// Reference Processor emits 2 levels JFR event, that can get us over the JFR
// event nesting level limits, in case of degenerated GC gets upgraded to
// full GC.
ShenandoahTimingsTracker phase_timing(phase_process);
if (_heap->has_forwarded_objects()) {
ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
const ReferenceProcessorStats& stats =
rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
&complete_gc, &executor,
&pt);
_heap->tracer()->report_gc_reference_stats(stats);
} else {
ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
const ReferenceProcessorStats& stats =
rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
&complete_gc, &executor,
&pt);
_heap->tracer()->report_gc_reference_stats(stats);
}
pt.print_all_references();
assert(task_queues()->is_empty(), "Should be empty");
}
}
class ShenandoahCancelledGCYieldClosure : public YieldClosure {
private:
ShenandoahHeap* const _heap;
public:
ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
virtual bool should_return() { return _heap->cancelled_gc(); }
};
class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
public:
void do_void() {
ShenandoahHeap* sh = ShenandoahHeap::heap();
ShenandoahConcurrentMark* scm = sh->concurrent_mark();
assert(sh->process_references(), "why else would we be here?");
TaskTerminator terminator(1, scm->task_queues());
ReferenceProcessor* rp = sh->ref_processor();
shenandoah_assert_rp_isalive_installed();
scm->mark_loop(0, &terminator, rp,
false, // not cancellable
false); // do not do strdedup
}
};
class ShenandoahPrecleanTask : public AbstractGangTask {
private:
ReferenceProcessor* _rp;
public:
ShenandoahPrecleanTask(ReferenceProcessor* rp) :
AbstractGangTask("Shenandoah Precleaning"),
_rp(rp) {}
void work(uint worker_id) {
assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahHeap* sh = ShenandoahHeap::heap();
assert(!sh->has_forwarded_objects(), "No forwarded objects expected here");
ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
ShenandoahCancelledGCYieldClosure yield;
ShenandoahPrecleanCompleteGCClosure complete_gc;
ShenandoahIsAliveClosure is_alive;
ShenandoahCMKeepAliveClosure keep_alive(q);
ResourceMark rm;
_rp->preclean_discovered_references(&is_alive, &keep_alive,
&complete_gc, &yield,
NULL);
}
};
void ShenandoahConcurrentMark::preclean_weak_refs() {
// Pre-cleaning weak references before diving into STW makes sense at the
// end of concurrent mark. This will filter out the references which referents
// are alive. Note that ReferenceProcessor already filters out these on reference
// discovery, and the bulk of work is done here. This phase processes leftovers
// that missed the initial filtering, i.e. when referent was marked alive after
// reference was discovered by RP.
assert(_heap->process_references(), "sanity");
// Shortcut if no references were discovered to avoid winding up threads.
ReferenceProcessor* rp = _heap->ref_processor();
if (!rp->has_discovered_references()) {
return;
}
assert(task_queues()->is_empty(), "Should be empty");
ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
shenandoah_assert_rp_isalive_not_installed();
ShenandoahIsAliveSelector is_alive;
ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
// Execute precleaning in the worker thread: it will give us GCLABs, String dedup
// queues and other goodies. When upstream ReferenceProcessor starts supporting
// parallel precleans, we can extend this to more threads.
WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers();
assert(nworkers == 1, "This code uses only a single worker");
task_queues()->reserve(nworkers);
ShenandoahPrecleanTask task(rp);
workers->run_task(&task);
assert(task_queues()->is_empty(), "Should be empty");
}
void ShenandoahConcurrentMark::cancel() { void ShenandoahConcurrentMark::cancel() {
// Clean up marking stacks. // Clean up marking stacks.
ShenandoahObjToScanQueueSet* queues = task_queues(); ShenandoahObjToScanQueueSet* queues = task_queues();
@ -876,7 +514,7 @@ ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
} }
template <bool CANCELLABLE> template <bool CANCELLABLE>
void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp, void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor* rp,
bool strdedup) { bool strdedup) {
ShenandoahObjToScanQueue* q = get_queue(w); ShenandoahObjToScanQueue* q = get_queue(w);
@ -934,6 +572,8 @@ void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_da
ShenandoahObjToScanQueue* q; ShenandoahObjToScanQueue* q;
ShenandoahMarkTask t; ShenandoahMarkTask t;
_heap->ref_processor()->set_mark_closure(worker_id, cl);
/* /*
* Process outstanding queues, if any. * Process outstanding queues, if any.
* *

View file

@ -32,6 +32,7 @@
#include "gc/shenandoah/shenandoahTaskqueue.hpp" #include "gc/shenandoah/shenandoahTaskqueue.hpp"
class ShenandoahStrDedupQueue; class ShenandoahStrDedupQueue;
class ShenandoahReferenceProcessor;
class ShenandoahConcurrentMark: public CHeapObj<mtGC> { class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
private: private:
@ -49,10 +50,10 @@ private:
inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task); inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task);
template <class T> template <class T>
inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array); inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array, bool weak);
template <class T> template <class T>
inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow); inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow, bool weak);
inline void count_liveness(ShenandoahLiveData* live_data, oop obj); inline void count_liveness(ShenandoahLiveData* live_data, oop obj);
@ -60,10 +61,10 @@ private:
void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *t); void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *t);
template <bool CANCELLABLE> template <bool CANCELLABLE>
void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ReferenceProcessor *rp, bool strdedup); void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ShenandoahReferenceProcessor* rp, bool strdedup);
public: public:
void mark_loop(uint worker_id, TaskTerminator* terminator, ReferenceProcessor *rp, void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor* rp,
bool cancellable, bool strdedup) { bool cancellable, bool strdedup) {
if (cancellable) { if (cancellable) {
mark_loop_prework<true>(worker_id, terminator, rp, strdedup); mark_loop_prework<true>(worker_id, terminator, rp, strdedup);
@ -73,7 +74,7 @@ public:
} }
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP> template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context); static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak);
void mark_from_roots(); void mark_from_roots();
void finish_mark_from_roots(bool full_gc); void finish_mark_from_roots(bool full_gc);
@ -82,15 +83,6 @@ public:
void update_roots(ShenandoahPhaseTimings::Phase root_phase); void update_roots(ShenandoahPhaseTimings::Phase root_phase);
void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase); void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase);
// ---------- Weak references
//
private:
void weak_refs_work(bool full_gc);
void weak_refs_work_doit(bool full_gc);
public:
void preclean_weak_refs();
// ---------- Helpers // ---------- Helpers
// Used from closures, need to be public // Used from closures, need to be public
// //

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,10 @@ void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, Shena
shenandoah_assert_marked(NULL, obj); shenandoah_assert_marked(NULL, obj);
shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc()); shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc());
// Are we in weak subgraph scan?
bool weak = task->is_weak();
cl->set_weak(weak);
if (task->is_not_chunked()) { if (task->is_not_chunked()) {
if (obj->is_instance()) { if (obj->is_instance()) {
// Case 1: Normal oop, process as usual. // Case 1: Normal oop, process as usual.
@ -52,7 +56,7 @@ void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, Shena
} else if (obj->is_objArray()) { } else if (obj->is_objArray()) {
// Case 2: Object array instance and no chunk is set. Must be the first // Case 2: Object array instance and no chunk is set. Must be the first
// time we visit it, start the chunked processing. // time we visit it, start the chunked processing.
do_chunked_array_start<T>(q, cl, obj); do_chunked_array_start<T>(q, cl, obj, weak);
} else { } else {
// Case 3: Primitive array. Do nothing, no oops there. We use the same // Case 3: Primitive array. Do nothing, no oops there. We use the same
// performance tweak TypeArrayKlass::oop_oop_iterate_impl is using: // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
@ -61,10 +65,14 @@ void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, Shena
assert (obj->is_typeArray(), "should be type array"); assert (obj->is_typeArray(), "should be type array");
} }
// Count liveness the last: push the outstanding work to the queues first // Count liveness the last: push the outstanding work to the queues first
// Avoid double-counting objects that are visited twice due to upgrade
// from final- to strong mark.
if (task->count_liveness()) {
count_liveness(live_data, obj); count_liveness(live_data, obj);
}
} else { } else {
// Case 4: Array chunk, has sensible chunk id. Process it. // Case 4: Array chunk, has sensible chunk id. Process it.
do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow()); do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
} }
} }
@ -98,7 +106,7 @@ inline void ShenandoahConcurrentMark::count_liveness(ShenandoahLiveData* live_da
} }
template <class T> template <class T>
inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) { inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
assert(obj->is_objArray(), "expect object array"); assert(obj->is_objArray(), "expect object array");
objArrayOop array = objArrayOop(obj); objArrayOop array = objArrayOop(obj);
int len = array->length(); int len = array->length();
@ -129,7 +137,7 @@ inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScan
pow--; pow--;
chunk = 2; chunk = 2;
last_idx = (1 << pow); last_idx = (1 << pow);
bool pushed = q->push(ShenandoahMarkTask(array, 1, pow)); bool pushed = q->push(ShenandoahMarkTask(array, true, weak, 1, pow));
assert(pushed, "overflow queue should always succeed pushing"); assert(pushed, "overflow queue should always succeed pushing");
} }
@ -142,7 +150,7 @@ inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScan
int right_chunk = chunk*2; int right_chunk = chunk*2;
int left_chunk_end = left_chunk * (1 << pow); int left_chunk_end = left_chunk * (1 << pow);
if (left_chunk_end < len) { if (left_chunk_end < len) {
bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow)); bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
assert(pushed, "overflow queue should always succeed pushing"); assert(pushed, "overflow queue should always succeed pushing");
chunk = right_chunk; chunk = right_chunk;
last_idx = left_chunk_end; last_idx = left_chunk_end;
@ -160,7 +168,7 @@ inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScan
} }
template <class T> template <class T>
inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) { inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
assert(obj->is_objArray(), "expect object array"); assert(obj->is_objArray(), "expect object array");
objArrayOop array = objArrayOop(obj); objArrayOop array = objArrayOop(obj);
@ -171,7 +179,7 @@ inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue*
while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) { while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
pow--; pow--;
chunk *= 2; chunk *= 2;
bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow)); bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
assert(pushed, "overflow queue should always succeed pushing"); assert(pushed, "overflow queue should always succeed pushing");
} }
@ -215,13 +223,13 @@ public:
void do_buffer_impl(void **buffer, size_t size) { void do_buffer_impl(void **buffer, size_t size) {
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
oop *p = (oop *) &buffer[i]; oop *p = (oop *) &buffer[i];
ShenandoahConcurrentMark::mark_through_ref<oop, NONE, STRING_DEDUP>(p, _heap, _queue, _mark_context); ShenandoahConcurrentMark::mark_through_ref<oop, NONE, STRING_DEDUP>(p, _heap, _queue, _mark_context, false);
} }
} }
}; };
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP> template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context) { inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
T o = RawAccess<>::oop_load(p); T o = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(o)) { if (!CompressedOops::is_null(o)) {
oop obj = CompressedOops::decode_not_null(o); oop obj = CompressedOops::decode_not_null(o);
@ -252,8 +260,15 @@ inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* hea
shenandoah_assert_not_forwarded(p, obj); shenandoah_assert_not_forwarded(p, obj);
shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc()); shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
if (mark_context->mark(obj)) { bool skip_live = false;
bool pushed = q->push(ShenandoahMarkTask(obj)); bool marked;
if (weak) {
marked = mark_context->mark_weak(obj);
} else {
marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
}
if (marked) {
bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
assert(pushed, "overflow queue should always succeed pushing"); assert(pushed, "overflow queue should always succeed pushing");
if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) { if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) {

View file

@ -141,7 +141,6 @@ void ShenandoahControlThread::run_service() {
policy->record_explicit_to_concurrent(); policy->record_explicit_to_concurrent();
mode = default_mode; mode = default_mode;
// Unload and clean up everything // Unload and clean up everything
heap->set_process_references(heuristics->can_process_references());
heap->set_unload_classes(heuristics->can_unload_classes()); heap->set_unload_classes(heuristics->can_unload_classes());
} else { } else {
policy->record_explicit_to_full(); policy->record_explicit_to_full();
@ -158,7 +157,6 @@ void ShenandoahControlThread::run_service() {
mode = default_mode; mode = default_mode;
// Unload and clean up everything // Unload and clean up everything
heap->set_process_references(heuristics->can_process_references());
heap->set_unload_classes(heuristics->can_unload_classes()); heap->set_unload_classes(heuristics->can_unload_classes());
} else { } else {
policy->record_implicit_to_full(); policy->record_implicit_to_full();
@ -172,7 +170,6 @@ void ShenandoahControlThread::run_service() {
} }
// Ask policy if this cycle wants to process references or unload classes // Ask policy if this cycle wants to process references or unload classes
heap->set_process_references(heuristics->should_process_references());
heap->set_unload_classes(heuristics->should_unload_classes()); heap->set_unload_classes(heuristics->should_unload_classes());
} }
@ -404,14 +401,12 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau
heap->entry_mark(); heap->entry_mark();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
// If not cancelled, can try to concurrently pre-clean
heap->entry_preclean();
// Complete marking under STW, and start evacuation // Complete marking under STW, and start evacuation
heap->vmop_entry_final_mark(); heap->vmop_entry_final_mark();
// Process weak roots that might still point to regions that would be broken by cleanup // Process weak roots that might still point to regions that would be broken by cleanup
if (heap->is_concurrent_weak_root_in_progress()) { if (heap->is_concurrent_weak_root_in_progress()) {
heap->entry_weak_refs();
heap->entry_weak_roots(); heap->entry_weak_roots();
} }

View file

@ -55,6 +55,7 @@
#include "gc/shenandoah/shenandoahPacer.inline.hpp" #include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp" #include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp" #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp" #include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp" #include "gc/shenandoah/shenandoahTaskqueue.hpp"
@ -206,10 +207,10 @@ jint ShenandoahHeap::initialize() {
// Reserve and commit memory for bitmap(s) // Reserve and commit memory for bitmap(s)
// //
_bitmap_size = MarkBitMap::compute_size(heap_rs.size()); _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
_bitmap_size = align_up(_bitmap_size, bitmap_page_size); _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
guarantee(bitmap_bytes_per_region != 0, guarantee(bitmap_bytes_per_region != 0,
"Bitmap bytes per region should not be zero"); "Bitmap bytes per region should not be zero");
@ -393,9 +394,6 @@ jint ShenandoahHeap::initialize() {
_control_thread = new ShenandoahControlThread(); _control_thread = new ShenandoahControlThread();
_ref_proc_mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
_ref_proc_mt_discovery = _max_workers > 1;
ShenandoahInitLogger::print(); ShenandoahInitLogger::print();
return JNI_OK; return JNI_OK;
@ -475,7 +473,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_soft_ref_policy(), _soft_ref_policy(),
_log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes), _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
_ref_processor(NULL), _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
_marking_context(NULL), _marking_context(NULL),
_bitmap_size(0), _bitmap_size(0),
_bitmap_regions_per_slice(0), _bitmap_regions_per_slice(0),
@ -615,8 +613,6 @@ void ShenandoahHeap::post_initialize() {
_scm->initialize(_max_workers); _scm->initialize(_max_workers);
_full_gc->initialize(_gc_timer); _full_gc->initialize(_gc_timer);
ref_processing_init();
_heuristics->initialize(); _heuristics->initialize();
JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers()); JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
@ -1791,13 +1787,9 @@ void ShenandoahHeap::op_final_mark() {
concurrent_mark()->cancel(); concurrent_mark()->cancel();
set_concurrent_mark_in_progress(false); set_concurrent_mark_in_progress(false);
if (process_references()) {
// Abandon reference processing right away: pre-cleaning must have failed. // Abandon reference processing right away: pre-cleaning must have failed.
ReferenceProcessor *rp = ref_processor(); ShenandoahReferenceProcessor* rp = ref_processor();
rp->disable_discovery();
rp->abandon_partial_discovery(); rp->abandon_partial_discovery();
rp->verify_no_references_recorded();
}
} }
} }
@ -2004,6 +1996,15 @@ public:
} }
}; };
void ShenandoahHeap::op_weak_refs() {
// Concurrent weak refs processing
{
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_refs_work);
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs_work);
ref_processor()->process_references(workers(), true /* concurrent */);
}
}
void ShenandoahHeap::op_weak_roots() { void ShenandoahHeap::op_weak_roots() {
if (is_concurrent_weak_root_in_progress()) { if (is_concurrent_weak_root_in_progress()) {
// Concurrent weak root processing // Concurrent weak root processing
@ -2077,13 +2078,6 @@ void ShenandoahHeap::op_reset() {
parallel_heap_region_iterate(&cl); parallel_heap_region_iterate(&cl);
} }
void ShenandoahHeap::op_preclean() {
if (ShenandoahPacing) {
pacer()->setup_for_preclean();
}
concurrent_mark()->preclean_weak_refs();
}
void ShenandoahHeap::op_full(GCCause::Cause cause) { void ShenandoahHeap::op_full(GCCause::Cause cause) {
ShenandoahMetricsSnapshot metrics; ShenandoahMetricsSnapshot metrics;
metrics.snap_before(); metrics.snap_before();
@ -2125,7 +2119,6 @@ void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
// //
// Note that we can only do this for "outside-cycle" degens, otherwise we would risk // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
// changing the cycle parameters mid-cycle during concurrent -> degenerated handover. // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
set_process_references(heuristics()->can_process_references());
set_unload_classes(heuristics()->can_unload_classes()); set_unload_classes(heuristics()->can_unload_classes());
op_reset(); op_reset();
@ -2150,6 +2143,12 @@ void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
ShenandoahCodeRoots::disarm_nmethods(); ShenandoahCodeRoots::disarm_nmethods();
} }
{
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_refs_work);
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs_work);
ref_processor()->process_references(workers(), false /* concurrent */);
}
op_cleanup_early(); op_cleanup_early();
case _degenerated_evac: case _degenerated_evac:
@ -2310,22 +2309,6 @@ void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool in_progress) {
} }
} }
void ShenandoahHeap::ref_processing_init() {
assert(_max_workers > 0, "Sanity");
_ref_processor =
new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery
_ref_proc_mt_processing, // MT processing
_max_workers, // Degree of MT processing
_ref_proc_mt_discovery, // MT discovery
_max_workers, // Degree of MT discovery
false, // Reference discovery is not atomic
NULL, // No closure, should be installed before use
true); // Scale worker threads
shenandoah_assert_rp_isalive_not_installed();
}
GCTracer* ShenandoahHeap::tracer() { GCTracer* ShenandoahHeap::tracer() {
return shenandoah_policy()->tracer(); return shenandoah_policy()->tracer();
} }
@ -2461,18 +2444,10 @@ void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
set_gc_state_mask(HAS_FORWARDED, cond); set_gc_state_mask(HAS_FORWARDED, cond);
} }
void ShenandoahHeap::set_process_references(bool pr) {
_process_references.set_cond(pr);
}
void ShenandoahHeap::set_unload_classes(bool uc) { void ShenandoahHeap::set_unload_classes(bool uc) {
_unload_classes.set_cond(uc); _unload_classes.set_cond(uc);
} }
bool ShenandoahHeap::process_references() const {
return _process_references.is_set();
}
bool ShenandoahHeap::unload_classes() const { bool ShenandoahHeap::unload_classes() const {
return _unload_classes.is_set(); return _unload_classes.is_set();
} }
@ -3067,6 +3042,19 @@ void ShenandoahHeap::entry_updaterefs() {
op_updaterefs(); op_updaterefs();
} }
void ShenandoahHeap::entry_weak_refs() {
static const char* msg = "Concurrent weak references";
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
EventMark em("%s", msg);
ShenandoahWorkerScope scope(workers(),
ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
"concurrent weak references");
try_inject_alloc_failure();
op_weak_refs();
}
void ShenandoahHeap::entry_weak_roots() { void ShenandoahHeap::entry_weak_roots() {
static const char* msg = "Concurrent weak roots"; static const char* msg = "Concurrent weak roots";
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
@ -3153,22 +3141,6 @@ void ShenandoahHeap::entry_reset() {
op_reset(); op_reset();
} }
void ShenandoahHeap::entry_preclean() {
if (ShenandoahPreclean && process_references()) {
static const char* msg = "Concurrent precleaning";
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_preclean);
EventMark em("%s", msg);
ShenandoahWorkerScope scope(workers(),
ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
"concurrent preclean",
/* check_workers = */ false);
try_inject_alloc_failure();
op_preclean();
}
}
void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) { void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
static const char *msg = "Concurrent uncommit"; static const char *msg = "Concurrent uncommit";
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */); ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
@ -3245,14 +3217,9 @@ void ShenandoahHeap::deduplicate_string(oop str) {
const char* ShenandoahHeap::init_mark_event_message() const { const char* ShenandoahHeap::init_mark_event_message() const {
assert(!has_forwarded_objects(), "Should not have forwarded objects here"); assert(!has_forwarded_objects(), "Should not have forwarded objects here");
bool proc_refs = process_references();
bool unload_cls = unload_classes(); bool unload_cls = unload_classes();
if (proc_refs && unload_cls) { if (unload_cls) {
return "Pause Init Mark (process weakrefs) (unload classes)";
} else if (proc_refs) {
return "Pause Init Mark (process weakrefs)";
} else if (unload_cls) {
return "Pause Init Mark (unload classes)"; return "Pause Init Mark (unload classes)";
} else { } else {
return "Pause Init Mark"; return "Pause Init Mark";
@ -3262,14 +3229,9 @@ const char* ShenandoahHeap::init_mark_event_message() const {
const char* ShenandoahHeap::final_mark_event_message() const { const char* ShenandoahHeap::final_mark_event_message() const {
assert(!has_forwarded_objects(), "Should not have forwarded objects here"); assert(!has_forwarded_objects(), "Should not have forwarded objects here");
bool proc_refs = process_references();
bool unload_cls = unload_classes(); bool unload_cls = unload_classes();
if (proc_refs && unload_cls) { if (unload_cls) {
return "Pause Final Mark (process weakrefs) (unload classes)";
} else if (proc_refs) {
return "Pause Final Mark (process weakrefs)";
} else if (unload_cls) {
return "Pause Final Mark (unload classes)"; return "Pause Final Mark (unload classes)";
} else { } else {
return "Pause Final Mark"; return "Pause Final Mark";
@ -3279,14 +3241,9 @@ const char* ShenandoahHeap::final_mark_event_message() const {
const char* ShenandoahHeap::conc_mark_event_message() const { const char* ShenandoahHeap::conc_mark_event_message() const {
assert(!has_forwarded_objects(), "Should not have forwarded objects here"); assert(!has_forwarded_objects(), "Should not have forwarded objects here");
bool proc_refs = process_references();
bool unload_cls = unload_classes(); bool unload_cls = unload_classes();
if (proc_refs && unload_cls) { if (unload_cls) {
return "Concurrent marking (process weakrefs) (unload classes)";
} else if (proc_refs) {
return "Concurrent marking (process weakrefs)";
} else if (unload_cls) {
return "Concurrent marking (unload classes)"; return "Concurrent marking (unload classes)";
} else { } else {
return "Concurrent marking"; return "Concurrent marking";

View file

@ -42,7 +42,6 @@
class ConcurrentGCTimer; class ConcurrentGCTimer;
class ObjectIterateScanRootClosure; class ObjectIterateScanRootClosure;
class ReferenceProcessor;
class ShenandoahCollectorPolicy; class ShenandoahCollectorPolicy;
class ShenandoahControlThread; class ShenandoahControlThread;
class ShenandoahGCSession; class ShenandoahGCSession;
@ -60,7 +59,7 @@ class ShenandoahFreeSet;
class ShenandoahConcurrentMark; class ShenandoahConcurrentMark;
class ShenandoahMarkCompact; class ShenandoahMarkCompact;
class ShenandoahMonitoringSupport; class ShenandoahMonitoringSupport;
class ShenandoahObjToScanQueueSet; class ShenandoahReferenceProcessor;
class ShenandoahPacer; class ShenandoahPacer;
class ShenandoahVerifier; class ShenandoahVerifier;
class ShenandoahWorkGang; class ShenandoahWorkGang;
@ -390,7 +389,7 @@ public:
// for concurrent operation. // for concurrent operation.
void entry_reset(); void entry_reset();
void entry_mark(); void entry_mark();
void entry_preclean(); void entry_weak_refs();
void entry_weak_roots(); void entry_weak_roots();
void entry_class_unloading(); void entry_class_unloading();
void entry_strong_roots(); void entry_strong_roots();
@ -415,7 +414,7 @@ private:
void op_reset(); void op_reset();
void op_mark(); void op_mark();
void op_preclean(); void op_weak_refs();
void op_weak_roots(); void op_weak_roots();
void op_class_unloading(); void op_class_unloading();
void op_strong_roots(); void op_strong_roots();
@ -494,20 +493,10 @@ public:
// ---------- Reference processing // ---------- Reference processing
// //
private: private:
AlwaysTrueClosure _subject_to_discovery; ShenandoahReferenceProcessor* const _ref_processor;
ReferenceProcessor* _ref_processor;
ShenandoahSharedFlag _process_references;
bool _ref_proc_mt_discovery;
bool _ref_proc_mt_processing;
void ref_processing_init();
public: public:
ReferenceProcessor* ref_processor() { return _ref_processor; } ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
bool ref_processor_mt_discovery() { return _ref_proc_mt_discovery; }
bool ref_processor_mt_processing() { return _ref_proc_mt_processing; }
void set_process_references(bool pr);
bool process_references() const;
// ---------- Class Unloading // ---------- Class Unloading
// //

View file

@ -401,7 +401,6 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region,
ShenandoahMarkingContext* const ctx = complete_marking_context(); ShenandoahMarkingContext* const ctx = complete_marking_context();
assert(ctx->is_complete(), "sanity"); assert(ctx->is_complete(), "sanity");
MarkBitMap* mark_bit_map = ctx->mark_bit_map();
HeapWord* tams = ctx->top_at_mark_start(region); HeapWord* tams = ctx->top_at_mark_start(region);
size_t skip_bitmap_delta = 1; size_t skip_bitmap_delta = 1;
@ -413,7 +412,7 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region,
// Try to scan the initial candidate. If the candidate is above the TAMS, it would // Try to scan the initial candidate. If the candidate is above the TAMS, it would
// fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end); HeapWord* cb = ctx->get_next_marked_addr(start, end);
intx dist = ShenandoahMarkScanPrefetch; intx dist = ShenandoahMarkScanPrefetch;
if (dist > 0) { if (dist > 0) {
@ -440,7 +439,7 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region,
slots[avail++] = cb; slots[avail++] = cb;
cb += skip_bitmap_delta; cb += skip_bitmap_delta;
if (cb < limit_bitmap) { if (cb < limit_bitmap) {
cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap); cb = ctx->get_next_marked_addr(cb, limit_bitmap);
} }
} }
@ -463,7 +462,7 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region,
cl->do_object(obj); cl->do_object(obj);
cb += skip_bitmap_delta; cb += skip_bitmap_delta;
if (cb < limit_bitmap) { if (cb < limit_bitmap) {
cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap); cb = ctx->get_next_marked_addr(cb, limit_bitmap);
} }
} }
} }

View file

@ -57,10 +57,6 @@ void ShenandoahInitLogger::print_heap() {
log_info(gc, init)("Humongous Object Threshold: " SIZE_FORMAT "%s", log_info(gc, init)("Humongous Object Threshold: " SIZE_FORMAT "%s",
byte_size_in_exact_unit(ShenandoahHeapRegion::humongous_threshold_bytes()), byte_size_in_exact_unit(ShenandoahHeapRegion::humongous_threshold_bytes()),
exact_unit_for_byte_size(ShenandoahHeapRegion::humongous_threshold_bytes())); exact_unit_for_byte_size(ShenandoahHeapRegion::humongous_threshold_bytes()));
log_info(gc, init)("Reference Processing: %s discovery, %s processing",
heap->ref_processor_mt_discovery() ? "Parallel" : "Serial",
heap->ref_processor_mt_processing() ? "Parallel" : "Serial");
} }
void ShenandoahInitLogger::print() { void ShenandoahInitLogger::print() {

View file

@ -0,0 +1,146 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahMarkBitMap.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
ShenandoahMarkBitMap::ShenandoahMarkBitMap(MemRegion heap, MemRegion storage) :
_shift(LogMinObjAlignment),
_covered(heap),
_map((BitMap::bm_word_t*) storage.start()),
_size((heap.word_size() * 2) >> _shift) {
}
size_t ShenandoahMarkBitMap::compute_size(size_t heap_size) {
return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
}
size_t ShenandoahMarkBitMap::mark_distance() {
return MinObjAlignmentInBytes * BitsPerByte / 2;
}
HeapWord* ShenandoahMarkBitMap::get_next_marked_addr(const HeapWord* addr,
const HeapWord* limit) const {
assert(limit != NULL, "limit must not be NULL");
// Round addr up to a possible object boundary to be safe.
size_t const addr_offset = address_to_index(align_up(addr, HeapWordSize << LogMinObjAlignment));
size_t const limit_offset = address_to_index(limit);
size_t const nextOffset = get_next_one_offset(addr_offset, limit_offset);
return index_to_address(nextOffset);
}
void ShenandoahMarkBitMap::clear_range_within_word(idx_t beg, idx_t end) {
// With a valid range (beg <= end), this test ensures that end != 0, as
// required by inverted_bit_mask_for_range. Also avoids an unnecessary write.
if (beg != end) {
bm_word_t mask = inverted_bit_mask_for_range(beg, end);
*word_addr(beg) &= mask;
}
}
void ShenandoahMarkBitMap::clear_range(idx_t beg, idx_t end) {
verify_range(beg, end);
idx_t beg_full_word = to_words_align_up(beg);
idx_t end_full_word = to_words_align_down(end);
if (beg_full_word < end_full_word) {
// The range includes at least one full word.
clear_range_within_word(beg, bit_index(beg_full_word));
clear_range_of_words(beg_full_word, end_full_word);
clear_range_within_word(bit_index(end_full_word), end);
} else {
// The range spans at most 2 partial words.
idx_t boundary = MIN2(bit_index(beg_full_word), end);
clear_range_within_word(beg, boundary);
clear_range_within_word(boundary, end);
}
}
bool ShenandoahMarkBitMap::is_small_range_of_words(idx_t beg_full_word, idx_t end_full_word) {
// There is little point to call large version on small ranges.
// Need to check carefully, keeping potential idx_t over/underflow in mind,
// because beg_full_word > end_full_word can occur when beg and end are in
// the same word.
// The threshold should be at least one word.
STATIC_ASSERT(small_range_words >= 1);
return beg_full_word + small_range_words >= end_full_word;
}
void ShenandoahMarkBitMap::clear_large_range(idx_t beg, idx_t end) {
verify_range(beg, end);
idx_t beg_full_word = to_words_align_up(beg);
idx_t end_full_word = to_words_align_down(end);
if (is_small_range_of_words(beg_full_word, end_full_word)) {
clear_range(beg, end);
return;
}
// The range includes at least one full word.
clear_range_within_word(beg, bit_index(beg_full_word));
clear_large_range_of_words(beg_full_word, end_full_word);
clear_range_within_word(bit_index(end_full_word), end);
}
void ShenandoahMarkBitMap::clear_range_large(MemRegion mr) {
MemRegion intersection = mr.intersection(_covered);
assert(!intersection.is_empty(),
"Given range from " PTR_FORMAT " to " PTR_FORMAT " is completely outside the heap",
p2i(mr.start()), p2i(mr.end()));
// convert address range into offset range
size_t beg = address_to_index(intersection.start());
size_t end = address_to_index(intersection.end());
clear_large_range(beg, end);
}
#ifdef ASSERT
void ShenandoahMarkBitMap::check_mark(HeapWord* addr) const {
assert(ShenandoahHeap::heap()->is_in(addr),
"Trying to access bitmap " PTR_FORMAT " for address " PTR_FORMAT " not in the heap.",
p2i(this), p2i(addr));
}
void ShenandoahMarkBitMap::verify_index(idx_t bit) const {
assert(bit < _size,
"BitMap index out of bounds: " SIZE_FORMAT " >= " SIZE_FORMAT,
bit, _size);
}
void ShenandoahMarkBitMap::verify_limit(idx_t bit) const {
assert(bit <= _size,
"BitMap limit out of bounds: " SIZE_FORMAT " > " SIZE_FORMAT,
bit, _size);
}
void ShenandoahMarkBitMap::verify_range(idx_t beg, idx_t end) const {
assert(beg <= end,
"BitMap range error: " SIZE_FORMAT " > " SIZE_FORMAT, beg, end);
verify_limit(end);
}
#endif

View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_HPP
#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_HPP
#include "memory/memRegion.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
class ShenandoahMarkBitMap {
public:
typedef size_t idx_t; // Type used for bit and word indices.
typedef uintptr_t bm_word_t; // Element type of array that represents the
// bitmap, with BitsPerWord bits per element.
private:
// Values for get_next_bit_impl flip parameter.
static const bm_word_t find_ones_flip = 0;
static const bm_word_t find_zeros_flip = ~(bm_word_t)0;
int const _shift;
MemRegion _covered;
bm_word_t* _map; // First word in bitmap
idx_t _size; // Size of bitmap (in bits)
// Threshold for performing small range operation, even when large range
// operation was requested. Measured in words.
static const size_t small_range_words = 32;
static bool is_small_range_of_words(idx_t beg_full_word, idx_t end_full_word);
inline size_t address_to_index(const HeapWord* addr) const;
inline HeapWord* index_to_address(size_t offset) const;
void check_mark(HeapWord* addr) const NOT_DEBUG_RETURN;
// Return a mask that will select the specified bit, when applied to the word
// containing the bit.
static bm_word_t bit_mask(idx_t bit) { return (bm_word_t)1 << bit_in_word(bit); }
// Return the bit number of the first bit in the specified word.
static idx_t bit_index(idx_t word) { return word << LogBitsPerWord; }
// Return the position of bit within the word that contains it (e.g., if
// bitmap words are 32 bits, return a number 0 <= n <= 31).
static idx_t bit_in_word(idx_t bit) { return bit & (BitsPerWord - 1); }
bm_word_t* map() { return _map; }
const bm_word_t* map() const { return _map; }
bm_word_t map(idx_t word) const { return _map[word]; }
// Return a pointer to the word containing the specified bit.
bm_word_t* word_addr(idx_t bit) {
return map() + to_words_align_down(bit);
}
const bm_word_t* word_addr(idx_t bit) const {
return map() + to_words_align_down(bit);
}
static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order);
bool at(idx_t index) const {
verify_index(index);
return (*word_addr(index) & bit_mask(index)) != 0;
}
// Assumes relevant validity checking for bit has already been done.
static idx_t raw_to_words_align_up(idx_t bit) {
return raw_to_words_align_down(bit + (BitsPerWord - 1));
}
// Assumes relevant validity checking for bit has already been done.
static idx_t raw_to_words_align_down(idx_t bit) {
return bit >> LogBitsPerWord;
}
// Word-aligns bit and converts it to a word offset.
// precondition: bit <= size()
idx_t to_words_align_up(idx_t bit) const {
verify_limit(bit);
return raw_to_words_align_up(bit);
}
// Word-aligns bit and converts it to a word offset.
// precondition: bit <= size()
inline idx_t to_words_align_down(idx_t bit) const {
verify_limit(bit);
return raw_to_words_align_down(bit);
}
// Helper for get_next_{zero,one}_bit variants.
// - flip designates whether searching for 1s or 0s. Must be one of
// find_{zeros,ones}_flip.
// - aligned_right is true if r_index is a priori on a bm_word_t boundary.
template<bm_word_t flip, bool aligned_right>
inline idx_t get_next_bit_impl(idx_t l_index, idx_t r_index) const;
inline idx_t get_next_one_offset (idx_t l_index, idx_t r_index) const;
void clear_large_range (idx_t beg, idx_t end);
// Verify bit is less than size().
void verify_index(idx_t bit) const NOT_DEBUG_RETURN;
// Verify bit is not greater than size().
void verify_limit(idx_t bit) const NOT_DEBUG_RETURN;
// Verify [beg,end) is a valid range, e.g. beg <= end <= size().
void verify_range(idx_t beg, idx_t end) const NOT_DEBUG_RETURN;
public:
static size_t compute_size(size_t heap_size);
// Returns the amount of bytes on the heap between two marks in the bitmap.
static size_t mark_distance();
// Returns how many bytes (or bits) of the heap a single byte (or bit) of the
// mark bitmap corresponds to. This is the same as the mark distance above.
static size_t heap_map_factor() {
return mark_distance();
}
ShenandoahMarkBitMap(MemRegion heap, MemRegion storage);
// Mark word as 'strong' if it hasn't been marked strong yet.
// Return true if the word has been marked strong, false if it has already been
// marked strong or if another thread has beat us by marking it
// strong.
// Words that have been marked final before or by a concurrent thread will be
// upgraded to strong. In this case, this method also returns true.
inline bool mark_strong(HeapWord* w, bool& was_upgraded);
// Mark word as 'weak' if it hasn't been marked weak or strong yet.
// Return true if the word has been marked weak, false if it has already been
// marked strong or weak or if another thread has beat us by marking it
// strong or weak.
inline bool mark_weak(HeapWord* heap_addr);
inline bool is_marked(HeapWord* addr) const;
inline bool is_marked_strong(HeapWord* w) const;
inline bool is_marked_weak(HeapWord* addr) const;
// Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
HeapWord* get_next_marked_addr(const HeapWord* addr,
const HeapWord* limit) const;
bm_word_t inverted_bit_mask_for_range(idx_t beg, idx_t end) const;
void clear_range_within_word (idx_t beg, idx_t end);
void clear_range (idx_t beg, idx_t end);
void clear_range_large(MemRegion mr);
void clear_range_of_words(idx_t beg, idx_t end);
void clear_large_range_of_words(idx_t beg, idx_t end);
static void clear_range_of_words(bm_word_t* map, idx_t beg, idx_t end);
};
#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_HPP

View file

@ -0,0 +1,218 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_INLINE_HPP
#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_INLINE_HPP
#include "gc/shenandoah/shenandoahMarkBitMap.hpp"
#include "runtime/atomic.hpp"
#include "utilities/count_trailing_zeros.hpp"
inline size_t ShenandoahMarkBitMap::address_to_index(const HeapWord* addr) const {
return (pointer_delta(addr, _covered.start()) << 1) >> _shift;
}
inline HeapWord* ShenandoahMarkBitMap::index_to_address(size_t offset) const {
return _covered.start() + ((offset >> 1) << _shift);
}
inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upgraded) {
check_mark(heap_addr);
idx_t bit = address_to_index(heap_addr);
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask = bit_mask(bit);
const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1);
bm_word_t old_val = load_word_ordered(addr, memory_order_conservative);
do {
const bm_word_t new_val = old_val | mask;
if (new_val == old_val) {
assert(!was_upgraded, "Should be false already");
return false; // Someone else beat us to it.
}
const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_conservative);
if (cur_val == old_val) {
was_upgraded = (cur_val & mask_weak) != 0;
return true; // Success.
}
old_val = cur_val; // The value changed, try again.
} while (true);
}
inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) {
check_mark(heap_addr);
idx_t bit = address_to_index(heap_addr);
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1);
const bm_word_t mask_strong = (bm_word_t)1 << bit_in_word(bit);
bm_word_t old_val = load_word_ordered(addr, memory_order_conservative);
do {
if ((old_val & mask_strong) != 0) {
return false; // Already marked strong
}
const bm_word_t new_val = old_val | mask_weak;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_conservative);
if (cur_val == old_val) {
return true; // Success.
}
old_val = cur_val; // The value changed, try again.
} while (true);
}
inline bool ShenandoahMarkBitMap::is_marked_strong(HeapWord* addr) const {
check_mark(addr);
return at(address_to_index(addr));
}
inline bool ShenandoahMarkBitMap::is_marked_weak(HeapWord* addr) const {
check_mark(addr);
return at(address_to_index(addr) + 1);
}
inline bool ShenandoahMarkBitMap::is_marked(HeapWord* addr) const {
check_mark(addr);
idx_t index = address_to_index(addr);
verify_index(index);
bm_word_t mask = (bm_word_t)3 << bit_in_word(index);
return (*word_addr(index) & mask) != 0;
}
inline const ShenandoahMarkBitMap::bm_word_t ShenandoahMarkBitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) {
if (memory_order == memory_order_relaxed || memory_order == memory_order_release) {
return Atomic::load(addr);
} else {
assert(memory_order == memory_order_acq_rel ||
memory_order == memory_order_acquire ||
memory_order == memory_order_conservative,
"unexpected memory ordering");
return Atomic::load_acquire(addr);
}
}
template<ShenandoahMarkBitMap::bm_word_t flip, bool aligned_right>
inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_bit_impl(idx_t l_index, idx_t r_index) const {
STATIC_ASSERT(flip == find_ones_flip || flip == find_zeros_flip);
verify_range(l_index, r_index);
assert(!aligned_right || is_aligned(r_index, BitsPerWord), "r_index not aligned");
// The first word often contains an interesting bit, either due to
// density or because of features of the calling algorithm. So it's
// important to examine that first word with a minimum of fuss,
// minimizing setup time for later words that will be wasted if the
// first word is indeed interesting.
// The benefit from aligned_right being true is relatively small.
// It saves an operation in the setup for the word search loop.
// It also eliminates the range check on the final result.
// However, callers often have a comparison with r_index, and
// inlining often allows the two comparisons to be combined; it is
// important when !aligned_right that return paths either return
// r_index or a value dominated by a comparison with r_index.
// aligned_right is still helpful when the caller doesn't have a
// range check because features of the calling algorithm guarantee
// an interesting bit will be present.
if (l_index < r_index) {
// Get the word containing l_index, and shift out low bits.
idx_t index = to_words_align_down(l_index);
bm_word_t cword = (map(index) ^ flip) >> bit_in_word(l_index);
if ((cword & 1) != 0) {
// The first bit is similarly often interesting. When it matters
// (density or features of the calling algorithm make it likely
// the first bit is set), going straight to the next clause compares
// poorly with doing this check first; count_trailing_zeros can be
// relatively expensive, plus there is the additional range check.
// But when the first bit isn't set, the cost of having tested for
// it is relatively small compared to the rest of the search.
return l_index;
} else if (cword != 0) {
// Flipped and shifted first word is non-zero.
idx_t result = l_index + count_trailing_zeros(cword);
if (aligned_right || (result < r_index)) return result;
// Result is beyond range bound; return r_index.
} else {
// Flipped and shifted first word is zero. Word search through
// aligned up r_index for a non-zero flipped word.
idx_t limit = aligned_right
? to_words_align_down(r_index) // Miniscule savings when aligned.
: to_words_align_up(r_index);
while (++index < limit) {
cword = map(index) ^ flip;
if (cword != 0) {
idx_t result = bit_index(index) + count_trailing_zeros(cword);
if (aligned_right || (result < r_index)) return result;
// Result is beyond range bound; return r_index.
assert((index + 1) == limit, "invariant");
break;
}
}
// No bits in range; return r_index.
}
}
return r_index;
}
inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_one_offset(idx_t l_offset, idx_t r_offset) const {
return get_next_bit_impl<find_ones_flip, false>(l_offset, r_offset);
}
// Returns a bit mask for a range of bits [beg, end) within a single word. Each
// bit in the mask is 0 if the bit is in the range, 1 if not in the range. The
// returned mask can be used directly to clear the range, or inverted to set the
// range. Note: end must not be 0.
inline ShenandoahMarkBitMap::bm_word_t
ShenandoahMarkBitMap::inverted_bit_mask_for_range(idx_t beg, idx_t end) const {
assert(end != 0, "does not work when end == 0");
assert(beg == end || to_words_align_down(beg) == to_words_align_down(end - 1),
"must be a single-word range");
bm_word_t mask = bit_mask(beg) - 1; // low (right) bits
if (bit_in_word(end) != 0) {
mask |= ~(bit_mask(end) - 1); // high (left) bits
}
return mask;
}
inline void ShenandoahMarkBitMap::clear_range_of_words(bm_word_t* map, idx_t beg, idx_t end) {
for (idx_t i = beg; i < end; ++i) map[i] = 0;
}
inline void ShenandoahMarkBitMap::clear_large_range_of_words(idx_t beg, idx_t end) {
assert(beg <= end, "underflow");
memset(_map + beg, 0, (end - beg) * sizeof(bm_word_t));
}
inline void ShenandoahMarkBitMap::clear_range_of_words(idx_t beg, idx_t end) {
clear_range_of_words(_map, beg, end);
}
#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_INLINE_HPP

View file

@ -38,6 +38,7 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahUtils.hpp"
@ -129,10 +130,8 @@ void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
assert(!heap->marking_context()->is_complete(), "sanity"); assert(!heap->marking_context()->is_complete(), "sanity");
// e. Abandon reference discovery and clear all discovered references. // e. Abandon reference discovery and clear all discovered references.
ReferenceProcessor* rp = heap->ref_processor(); ShenandoahReferenceProcessor* rp = heap->ref_processor();
rp->disable_discovery();
rp->abandon_partial_discovery(); rp->abandon_partial_discovery();
rp->verify_no_references_recorded();
// f. Set back forwarded objects bit back, in case some steps above dropped it. // f. Set back forwarded objects bit back, in case some steps above dropped it.
heap->set_has_forwarded_objects(has_forwarded_objects); heap->set_has_forwarded_objects(has_forwarded_objects);
@ -241,18 +240,16 @@ void ShenandoahMarkCompact::phase1_mark_heap() {
ShenandoahConcurrentMark* cm = heap->concurrent_mark(); ShenandoahConcurrentMark* cm = heap->concurrent_mark();
heap->set_process_references(heap->heuristics()->can_process_references());
heap->set_unload_classes(heap->heuristics()->can_unload_classes()); heap->set_unload_classes(heap->heuristics()->can_unload_classes());
ReferenceProcessor* rp = heap->ref_processor(); ShenandoahReferenceProcessor* rp = heap->ref_processor();
// enable ("weak") refs discovery // enable ("weak") refs discovery
rp->enable_discovery(true /*verify_no_refs*/); rp->set_soft_reference_policy(true); // forcefully purge all soft references
rp->setup_policy(true); // forcefully purge all soft references
rp->set_active_mt_degree(heap->workers()->active_workers());
cm->mark_roots(ShenandoahPhaseTimings::full_gc_scan_roots); cm->mark_roots(ShenandoahPhaseTimings::full_gc_scan_roots);
cm->finish_mark_from_roots(/* full_gc = */ true); cm->finish_mark_from_roots(/* full_gc = */ true);
heap->mark_complete_marking_context(); heap->mark_complete_marking_context();
rp->process_references(heap->workers(), false /* concurrent */);
heap->parallel_cleaning(true /* full_gc */); heap->parallel_cleaning(true /* full_gc */);
} }

View file

@ -29,11 +29,11 @@
#include "gc/shenandoah/shenandoahMarkingContext.hpp" #include "gc/shenandoah/shenandoahMarkingContext.hpp"
ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) : ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) :
_mark_bit_map(heap_region, bitmap_region),
_top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), _top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)),
_top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), _top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)),
_top_at_mark_starts(_top_at_mark_starts_base - _top_at_mark_starts(_top_at_mark_starts_base -
((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) { ((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) {
_mark_bit_map.initialize(heap_region, bitmap_region);
} }
bool ShenandoahMarkingContext::is_bitmap_clear() const { bool ShenandoahMarkingContext::is_bitmap_clear() const {

View file

@ -25,7 +25,8 @@
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP
#include "gc/shared/markBitMap.hpp" #include "gc/shenandoah/shenandoahMarkBitMap.hpp"
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
#include "oops/oopsHierarchy.hpp" #include "oops/oopsHierarchy.hpp"
@ -35,7 +36,7 @@
*/ */
class ShenandoahMarkingContext : public CHeapObj<mtGC> { class ShenandoahMarkingContext : public CHeapObj<mtGC> {
private: private:
MarkBitMap _mark_bit_map; ShenandoahMarkBitMap _mark_bit_map;
HeapWord** const _top_bitmaps; HeapWord** const _top_bitmaps;
HeapWord** const _top_at_mark_starts_base; HeapWord** const _top_at_mark_starts_base;
@ -51,15 +52,19 @@ public:
* been marked by this thread. Returns false if the object has already been marked, * been marked by this thread. Returns false if the object has already been marked,
* or if a competing thread succeeded in marking this object. * or if a competing thread succeeded in marking this object.
*/ */
inline bool mark(oop obj); inline bool mark_strong(oop obj, bool& was_upgraded);
inline bool mark_weak(oop obj);
inline bool is_marked(oop obj) const; // Simple versions of marking accessors, to be used outside of marking (e.g. no possible concurrent updates)
inline bool is_marked(oop) const;
inline bool is_marked_strong(oop obj) const;
inline bool is_marked_weak(oop obj) const;
inline HeapWord* get_next_marked_addr(HeapWord* addr, HeapWord* limit) const;
inline bool allocated_after_mark_start(oop obj) const; inline bool allocated_after_mark_start(oop obj) const;
inline bool allocated_after_mark_start(HeapWord* addr) const; inline bool allocated_after_mark_start(HeapWord* addr) const;
inline MarkBitMap* mark_bit_map();
inline HeapWord* top_at_mark_start(ShenandoahHeapRegion* r) const; inline HeapWord* top_at_mark_start(ShenandoahHeapRegion* r) const;
inline void capture_top_at_mark_start(ShenandoahHeapRegion* r); inline void capture_top_at_mark_start(ShenandoahHeapRegion* r);
inline void reset_top_at_mark_start(ShenandoahHeapRegion* r); inline void reset_top_at_mark_start(ShenandoahHeapRegion* r);

View file

@ -25,19 +25,33 @@
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP
#include "gc/shenandoah/shenandoahMarkBitMap.inline.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.hpp" #include "gc/shenandoah/shenandoahMarkingContext.hpp"
inline MarkBitMap* ShenandoahMarkingContext::mark_bit_map() { inline bool ShenandoahMarkingContext::mark_strong(oop obj, bool& was_upgraded) {
return &_mark_bit_map; shenandoah_assert_not_forwarded(NULL, obj);
return (! allocated_after_mark_start(obj)) && _mark_bit_map.mark_strong(cast_from_oop<HeapWord*>(obj), was_upgraded);
} }
inline bool ShenandoahMarkingContext::mark(oop obj) { inline bool ShenandoahMarkingContext::mark_weak(oop obj) {
shenandoah_assert_not_forwarded(NULL, obj); shenandoah_assert_not_forwarded(NULL, obj);
return (! allocated_after_mark_start(obj)) && _mark_bit_map.par_mark(obj); return (! allocated_after_mark_start(obj)) && _mark_bit_map.mark_weak(cast_from_oop<HeapWord *>(obj));
} }
inline bool ShenandoahMarkingContext::is_marked(oop obj) const { inline bool ShenandoahMarkingContext::is_marked(oop obj) const {
return allocated_after_mark_start(obj) || _mark_bit_map.is_marked(obj); return allocated_after_mark_start(obj) || _mark_bit_map.is_marked(cast_from_oop<HeapWord *>(obj));
}
inline bool ShenandoahMarkingContext::is_marked_strong(oop obj) const {
return allocated_after_mark_start(obj) || _mark_bit_map.is_marked_strong(cast_from_oop<HeapWord*>(obj));
}
inline bool ShenandoahMarkingContext::is_marked_weak(oop obj) const {
return allocated_after_mark_start(obj) || _mark_bit_map.is_marked_weak(cast_from_oop<HeapWord *>(obj));
}
inline HeapWord* ShenandoahMarkingContext::get_next_marked_addr(HeapWord* start, HeapWord* limit) const {
return _mark_bit_map.get_next_marked_addr(start, limit);
} }
inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const { inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const {

View file

@ -49,13 +49,22 @@ private:
ShenandoahObjToScanQueue* _queue; ShenandoahObjToScanQueue* _queue;
ShenandoahHeap* _heap; ShenandoahHeap* _heap;
ShenandoahMarkingContext* const _mark_context; ShenandoahMarkingContext* const _mark_context;
bool _weak;
protected: protected:
template <class T, UpdateRefsMode UPDATE_MODE, StringDedupMode STRING_DEDUP> template <class T, UpdateRefsMode UPDATE_MODE, StringDedupMode STRING_DEDUP>
void work(T *p); void work(T *p);
public: public:
ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp); ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp);
bool is_weak() const {
return _weak;
}
void set_weak(bool weak) {
_weak = weak;
}
}; };
class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure { class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure {
@ -64,7 +73,7 @@ private:
inline void do_oop_work(T* p) { work<T, CONCURRENT, NO_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, CONCURRENT, NO_DEDUP>(p); }
public: public:
ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -78,7 +87,7 @@ private:
inline void do_oop_work(T* p) { work<T, CONCURRENT, ENQUEUE_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, CONCURRENT, ENQUEUE_DEDUP>(p); }
public: public:
ShenandoahMarkUpdateRefsDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkUpdateRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -92,7 +101,7 @@ private:
inline void do_oop_work(T* p) { work<T, CONCURRENT, NO_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, CONCURRENT, NO_DEDUP>(p); }
public: public:
ShenandoahMarkUpdateRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkUpdateRefsMetadataClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -106,7 +115,7 @@ private:
inline void do_oop_work(T* p) { work<T, CONCURRENT, ENQUEUE_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, CONCURRENT, ENQUEUE_DEDUP>(p); }
public: public:
ShenandoahMarkUpdateRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkUpdateRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -120,7 +129,7 @@ private:
inline void do_oop_work(T* p) { work<T, NONE, NO_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, NONE, NO_DEDUP>(p); }
public: public:
ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -134,7 +143,7 @@ private:
inline void do_oop_work(T* p) { work<T, NONE, ENQUEUE_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, NONE, ENQUEUE_DEDUP>(p); }
public: public:
ShenandoahMarkRefsDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -148,7 +157,7 @@ private:
inline void do_oop_work(T* p) { work<T, RESOLVE, NO_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, RESOLVE, NO_DEDUP>(p); }
public: public:
ShenandoahMarkResolveRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkResolveRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -162,7 +171,7 @@ private:
inline void do_oop_work(T* p) { work<T, NONE, NO_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, NONE, NO_DEDUP>(p); }
public: public:
ShenandoahMarkRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkRefsMetadataClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@ -176,7 +185,7 @@ private:
inline void do_oop_work(T* p) { work<T, NONE, ENQUEUE_DEDUP>(p); } inline void do_oop_work(T* p) { work<T, NONE, ENQUEUE_DEDUP>(p); }
public: public:
ShenandoahMarkRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : ShenandoahMarkRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
ShenandoahMarkRefsSuperClosure(q, rp) {}; ShenandoahMarkRefsSuperClosure(q, rp) {};
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }

View file

@ -30,7 +30,7 @@
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP> template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
inline void ShenandoahMarkRefsSuperClosure::work(T *p) { inline void ShenandoahMarkRefsSuperClosure::work(T *p) {
ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context); ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context, _weak);
} }
template <class T> template <class T>

View file

@ -153,16 +153,6 @@ void ShenandoahPacer::setup_for_idle() {
* the allocators unnecessarily, allow them to run unimpeded. * the allocators unnecessarily, allow them to run unimpeded.
*/ */
void ShenandoahPacer::setup_for_preclean() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t initial = _heap->max_capacity();
restart_with(initial, 1.0);
log_info(gc, ergo)("Pacer for Precleaning. Non-Taxable: " SIZE_FORMAT "%s",
byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial));
}
void ShenandoahPacer::setup_for_reset() { void ShenandoahPacer::setup_for_reset() {
assert(ShenandoahPacing, "Only be here when pacing is enabled"); assert(ShenandoahPacing, "Only be here when pacing is enabled");

View file

@ -79,7 +79,6 @@ public:
void setup_for_updaterefs(); void setup_for_updaterefs();
void setup_for_reset(); void setup_for_reset();
void setup_for_preclean();
inline void report_mark(size_t words); inline void report_mark(size_t words);
inline void report_evac(size_t words); inline void report_evac(size_t words);

View file

@ -113,6 +113,7 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) {
case heap_iteration_roots: case heap_iteration_roots:
case conc_mark_roots: case conc_mark_roots:
case conc_weak_roots_work: case conc_weak_roots_work:
case conc_weak_refs_work:
case conc_strong_roots: case conc_strong_roots:
return true; return true;
default: default:

View file

@ -60,8 +60,6 @@ class outputStream;
f(conc_mark_roots, " Roots ") \ f(conc_mark_roots, " Roots ") \
SHENANDOAH_PAR_PHASE_DO(conc_mark_roots, " CM: ", f) \ SHENANDOAH_PAR_PHASE_DO(conc_mark_roots, " CM: ", f) \
\ \
f(conc_preclean, "Concurrent Precleaning") \
\
f(final_mark_gross, "Pause Final Mark (G)") \ f(final_mark_gross, "Pause Final Mark (G)") \
f(final_mark, "Pause Final Mark (N)") \ f(final_mark, "Pause Final Mark (N)") \
f(update_roots, " Update Roots") \ f(update_roots, " Update Roots") \
@ -82,6 +80,9 @@ class outputStream;
f(init_evac, " Initial Evacuation") \ f(init_evac, " Initial Evacuation") \
SHENANDOAH_PAR_PHASE_DO(evac_, " E: ", f) \ SHENANDOAH_PAR_PHASE_DO(evac_, " E: ", f) \
\ \
f(conc_weak_refs, "Concurrent Weak References") \
f(conc_weak_refs_work, " Process") \
SHENANDOAH_PAR_PHASE_DO(conc_weak_refs_work_, " CWRF: ", f) \
f(conc_weak_roots, "Concurrent Weak Roots") \ f(conc_weak_roots, "Concurrent Weak Roots") \
f(conc_weak_roots_work, " Roots") \ f(conc_weak_roots_work, " Roots") \
SHENANDOAH_PAR_PHASE_DO(conc_weak_roots_work_, " CWR: ", f) \ SHENANDOAH_PAR_PHASE_DO(conc_weak_roots_work_, " CWR: ", f) \

View file

@ -0,0 +1,592 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/shenandoah/shenandoahOopClosures.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "runtime/atomic.hpp"
#include "logging/log.hpp"
static ReferenceType reference_type(oop reference) {
return InstanceKlass::cast(reference->klass())->reference_type();
}
static const char* reference_type_name(ReferenceType type) {
switch (type) {
case REF_SOFT:
return "Soft";
case REF_WEAK:
return "Weak";
case REF_FINAL:
return "Final";
case REF_PHANTOM:
return "Phantom";
default:
ShouldNotReachHere();
return NULL;
}
}
template <typename T>
static void set_oop_field(T* field, oop value);
template <>
void set_oop_field<oop>(oop* field, oop value) {
*field = value;
}
template <>
void set_oop_field<narrowOop>(narrowOop* field, oop value) {
*field = CompressedOops::encode(value);
}
static oop lrb(oop obj) {
if (obj != NULL && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
} else {
return obj;
}
}
template <typename T>
static volatile T* reference_referent_addr(oop reference) {
return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference);
}
template <typename T>
static oop reference_referent(oop reference) {
T heap_oop = Atomic::load(reference_referent_addr<T>(reference));
return CompressedOops::decode(heap_oop);
}
static void reference_set_referent(oop reference, oop referent) {
java_lang_ref_Reference::set_referent_raw(reference, referent);
}
template <typename T>
static T* reference_discovered_addr(oop reference) {
return reinterpret_cast<T*>(java_lang_ref_Reference::discovered_addr_raw(reference));
}
template <typename T>
static oop reference_discovered(oop reference) {
T heap_oop = *reference_discovered_addr<T>(reference);
return lrb(CompressedOops::decode(heap_oop));
}
template <typename T>
static void reference_set_discovered(oop reference, oop discovered);
template <>
void reference_set_discovered<oop>(oop reference, oop discovered) {
*reference_discovered_addr<oop>(reference) = discovered;
}
template <>
void reference_set_discovered<narrowOop>(oop reference, oop discovered) {
*reference_discovered_addr<narrowOop>(reference) = CompressedOops::encode(discovered);
}
template<typename T>
static bool reference_cas_discovered(oop reference, oop discovered);
template<>
bool reference_cas_discovered<narrowOop>(oop reference, oop discovered) {
volatile narrowOop* addr = reinterpret_cast<volatile narrowOop*>(java_lang_ref_Reference::discovered_addr_raw(reference));
narrowOop compare = CompressedOops::encode(NULL);
narrowOop exchange = CompressedOops::encode(discovered);
return Atomic::cmpxchg(addr, compare, exchange) == compare;
}
template<>
bool reference_cas_discovered<oop>(oop reference, oop discovered) {
volatile oop* addr = reinterpret_cast<volatile oop*>(java_lang_ref_Reference::discovered_addr_raw(reference));
return Atomic::cmpxchg(addr, oop(NULL), discovered) == NULL;
}
template <typename T>
static T* reference_next_addr(oop reference) {
return reinterpret_cast<T*>(java_lang_ref_Reference::next_addr_raw(reference));
}
template <typename T>
static oop reference_next(oop reference) {
T heap_oop = RawAccess<>::oop_load(reference_next_addr<T>(reference));
return lrb(CompressedOops::decode(heap_oop));
}
static void reference_set_next(oop reference, oop next) {
java_lang_ref_Reference::set_next_raw(reference, next);
}
static void soft_reference_update_clock() {
const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
java_lang_ref_SoftReference::set_clock(now);
}
ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() :
_discovered_list(NULL),
_encountered_count(),
_discovered_count(),
_enqueued_count() {
}
void ShenandoahRefProcThreadLocal::reset() {
_discovered_list = NULL;
_mark_closure = NULL;
for (uint i = 0; i < reference_type_count; i++) {
_encountered_count[i] = 0;
_discovered_count[i] = 0;
_enqueued_count[i] = 0;
}
}
template <typename T>
T* ShenandoahRefProcThreadLocal::discovered_list_addr() {
return reinterpret_cast<T*>(&_discovered_list);
}
template <>
oop ShenandoahRefProcThreadLocal::discovered_list_head<oop>() const {
return *reinterpret_cast<const oop*>(&_discovered_list);
}
template <>
oop ShenandoahRefProcThreadLocal::discovered_list_head<narrowOop>() const {
return CompressedOops::decode(*reinterpret_cast<const narrowOop*>(&_discovered_list));
}
template <>
void ShenandoahRefProcThreadLocal::set_discovered_list_head<narrowOop>(oop head) {
*discovered_list_addr<narrowOop>() = CompressedOops::encode(head);
}
template <>
void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) {
*discovered_list_addr<oop>() = head;
}
ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) :
_soft_reference_policy(NULL),
_ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)),
_pending_list(NULL),
_pending_list_tail(&_pending_list),
_iterate_discovered_list_id(0U) {
for (size_t i = 0; i < max_workers; i++) {
_ref_proc_thread_locals[i].reset();
}
}
void ShenandoahReferenceProcessor::reset_thread_locals() {
uint max_workers = ShenandoahHeap::heap()->max_workers();
for (uint i = 0; i < max_workers; i++) {
_ref_proc_thread_locals[i].reset();
}
}
void ShenandoahReferenceProcessor::set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure) {
_ref_proc_thread_locals[worker_id].set_mark_closure(mark_closure);
}
void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) {
static AlwaysClearPolicy always_clear_policy;
static LRUMaxHeapPolicy lru_max_heap_policy;
if (clear) {
log_info(gc, ref)("Clearing All SoftReferences");
_soft_reference_policy = &always_clear_policy;
} else {
_soft_reference_policy = &lru_max_heap_policy;
}
_soft_reference_policy->setup();
}
template <typename T>
bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const {
if (type == REF_FINAL) {
// A FinalReference is inactive if its next field is non-null. An application can't
// call enqueue() or clear() on a FinalReference.
return reference_next<T>(reference) != NULL;
} else {
// A non-FinalReference is inactive if the referent is null. The referent can only
// be null if the application called Reference.enqueue() or Reference.clear().
return referent == NULL;
}
}
bool ShenandoahReferenceProcessor::is_strongly_live(oop referent) const {
return ShenandoahHeap::heap()->marking_context()->is_marked_strong(referent);
}
bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
if (type != REF_SOFT) {
// Not a SoftReference
return false;
}
// Ask SoftReference policy
const jlong clock = java_lang_ref_SoftReference::clock();
assert(clock != 0, "Clock not initialized");
assert(_soft_reference_policy != NULL, "Policy not initialized");
return !_soft_reference_policy->should_clear_reference(reference, clock);
}
template <typename T>
bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference);
T heap_oop = RawAccess<>::oop_load(referent_addr);
oop referent = CompressedOops::decode_not_null(heap_oop);
if (is_inactive<T>(reference, referent, type)) {
log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference));
return false;
}
if (is_strongly_live(referent)) {
log_trace(gc,ref)("Reference strongly live: " PTR_FORMAT, p2i(reference));
return false;
}
if (is_softly_live(reference, type)) {
log_trace(gc,ref)("Reference softly live: " PTR_FORMAT, p2i(reference));
return false;
}
return true;
}
template <typename T>
bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
const oop referent = reference_referent<T>(reference);
if (referent == NULL) {
// Reference has been cleared, by a call to Reference.enqueue()
// or Reference.clear() from the application, which means we
// should drop the reference.
return true;
}
// Check if the referent is still alive, in which case we should
// drop the reference.
if (type == REF_PHANTOM) {
return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent);
} else {
return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent);
}
}
template <typename T>
void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const {
if (type == REF_FINAL) {
// Don't clear referent. It is needed by the Finalizer thread to make the call
// to finalize(). A FinalReference is instead made inactive by self-looping the
// next field. An application can't call FinalReference.enqueue(), so there is
// no race to worry about when setting the next field.
assert(reference_next<T>(reference) == NULL, "Already inactive");
assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only make inactive final refs with alive referents");
reference_set_next(reference, reference);
} else {
// Clear referent
reference_set_referent(reference, NULL);
}
}
template <typename T>
bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, uint worker_id) {
if (!should_discover<T>(reference, type)) {
// Not discovered
return false;
}
if (reference_discovered<T>(reference) != NULL) {
// Already discovered. This can happen if the reference is marked finalizable first, and then strong,
// in which case it will be seen 2x by marking.
log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
return true;
}
if (type == REF_FINAL) {
ShenandoahMarkRefsSuperClosure* cl = _ref_proc_thread_locals[worker_id].mark_closure();
bool weak = cl->is_weak();
cl->set_weak(true);
if (UseCompressedOops) {
cl->do_oop(reinterpret_cast<narrowOop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
} else {
cl->do_oop(reinterpret_cast<oop*>(java_lang_ref_Reference::referent_addr_raw(reference)));
}
cl->set_weak(weak);
}
// Add reference to discovered list
assert(worker_id != ShenandoahThreadLocalData::INVALID_WORKER_ID, "need valid worker ID");
ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
oop discovered_head = refproc_data.discovered_list_head<T>();
if (discovered_head == NULL) {
// Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
// discovered field: if it is NULL, then it is not-yet discovered, otherwise it is discovered
discovered_head = reference;
}
if (reference_cas_discovered<T>(reference, discovered_head)) {
refproc_data.set_discovered_list_head<T>(reference);
assert(refproc_data.discovered_list_head<T>() == reference, "reference must be new discovered head");
log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
_ref_proc_thread_locals[worker_id].inc_discovered(type);
}
return true;
}
bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
if (!RegisterReferences) {
// Reference processing disabled
return false;
}
log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
uint worker_id = ShenandoahThreadLocalData::worker_id(Thread::current());
_ref_proc_thread_locals->inc_encountered(type);
if (UseCompressedOops) {
return discover<narrowOop>(reference, type, worker_id);
} else {
return discover<oop>(reference, type, worker_id);
}
}
template <typename T>
oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
assert(reference_referent<T>(reference) == NULL ||
ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only drop references with alive referents");
// Unlink and return next in list
oop next = reference_discovered<T>(reference);
reference_set_discovered<T>(reference, NULL);
return next;
}
template <typename T>
T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) {
log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
// Update statistics
_ref_proc_thread_locals[worker_id].inc_enqueued(type);
// Make reference inactive
make_inactive<T>(reference, type);
// Return next in list
return reference_discovered_addr<T>(reference);
}
template <typename T>
void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {;
log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head<T>()));
T* list = refproc_data.discovered_list_addr<T>();
// The list head is basically a GC root, we need to resolve and update it,
// otherwise we will later swap a from-space ref into Universe::pending_list().
if (!CompressedOops::is_null(*list)) {
oop first_resolved = lrb(CompressedOops::decode_not_null(*list));
set_oop_field(list, first_resolved);
}
T* p = list;
while (true) {
const oop reference = lrb(CompressedOops::decode(*p));
if (reference == NULL) {
break;
}
log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
const ReferenceType type = reference_type(reference);
if (should_drop<T>(reference, type)) {
set_oop_field(p, drop<T>(reference, type));
} else {
p = keep<T>(reference, type, worker_id);
}
const oop discovered = lrb(reference_discovered<T>(reference));
if (reference == discovered) {
// Reset terminating self-loop to NULL
reference_set_discovered<T>(reference, oop(NULL));
break;
}
}
// Prepend discovered references to internal pending list
if (!CompressedOops::is_null(*list)) {
oop head = lrb(CompressedOops::decode_not_null(*list));
shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
oop prev = Atomic::xchg(&_pending_list, head);
RawAccess<>::oop_store(p, prev);
if (prev == NULL) {
// First to prepend to list, record tail
_pending_list_tail = reinterpret_cast<void*>(p);
}
// Clear discovered list
set_oop_field(list, oop(NULL));
}
}
void ShenandoahReferenceProcessor::work() {
// Process discovered references
uint max_workers = ShenandoahHeap::heap()->max_workers();
uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U) - 1;
while (worker_id < max_workers) {
if (UseCompressedOops) {
process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id);
} else {
process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id);
}
worker_id = Atomic::add(&_iterate_discovered_list_id, 1U) - 1;
}
}
class ShenandoahReferenceProcessorTask : public AbstractGangTask {
private:
ShenandoahReferenceProcessor* const _reference_processor;
public:
ShenandoahReferenceProcessorTask(ShenandoahReferenceProcessor* reference_processor) :
AbstractGangTask("ShenandoahReferenceProcessorTask"),
_reference_processor(reference_processor) {
}
virtual void work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
_reference_processor->work();
}
};
void ShenandoahReferenceProcessor::process_references(WorkGang* workers, bool concurrent) {
Atomic::release_store_fence(&_iterate_discovered_list_id, 0U);
// Process discovered lists
ShenandoahReferenceProcessorTask task(this);
workers->run_task(&task);
// Update SoftReference clock
soft_reference_update_clock();
// Collect, log and trace statistics
collect_statistics();
enqueue_references(concurrent);
}
void ShenandoahReferenceProcessor::enqueue_references_locked() {
// Prepend internal pending list to external pending list
shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
if (UseCompressedOops) {
*reinterpret_cast<narrowOop*>(_pending_list_tail) = CompressedOops::encode(Universe::swap_reference_pending_list(_pending_list));
} else {
*reinterpret_cast<oop*>(_pending_list_tail) = Universe::swap_reference_pending_list(_pending_list);
}
}
void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
if (_pending_list == NULL) {
// Nothing to enqueue
return;
}
if (!concurrent) {
// When called from mark-compact or degen-GC, the locking is done by the VMOperation,
enqueue_references_locked();
} else {
// Heap_lock protects external pending list
MonitorLocker ml(Heap_lock, Mutex::_no_safepoint_check_flag);
enqueue_references_locked();
// Notify ReferenceHandler thread
ml.notify_all();
}
// Reset internal pending list
_pending_list = NULL;
_pending_list_tail = &_pending_list;
}
template<typename T>
void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
T discovered = *list;
while (!CompressedOops::is_null(discovered)) {
oop discovered_ref = CompressedOops::decode_not_null(discovered);
set_oop_field<T>(list, oop(NULL));
list = reference_discovered_addr<T>(discovered_ref);
discovered = *list;
}
}
void ShenandoahReferenceProcessor::abandon_partial_discovery() {
uint max_workers = ShenandoahHeap::heap()->max_workers();
for (uint index = 0; index < max_workers; index++) {
if (UseCompressedOops) {
clean_discovered_list<narrowOop>(_ref_proc_thread_locals[index].discovered_list_addr<narrowOop>());
} else {
clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>());
}
}
if (_pending_list != NULL) {
oop pending = _pending_list;
_pending_list = NULL;
if (UseCompressedOops) {
narrowOop* list = reference_discovered_addr<narrowOop>(pending);
clean_discovered_list<narrowOop>(list);
} else {
oop* list = reference_discovered_addr<oop>(pending);
clean_discovered_list<oop>(list);
}
}
_pending_list_tail = &_pending_list;
}
void ShenandoahReferenceProcessor::collect_statistics() {
Counters encountered = {};
Counters discovered = {};
Counters enqueued = {};
uint max_workers = ShenandoahHeap::heap()->max_workers();
for (uint i = 0; i < max_workers; i++) {
for (size_t type = 0; type < reference_type_count; type++) {
encountered[type] += _ref_proc_thread_locals[i].encountered((ReferenceType)type);
discovered[type] += _ref_proc_thread_locals[i].discovered((ReferenceType)type);
enqueued[type] += _ref_proc_thread_locals[i].enqueued((ReferenceType)type);
}
}
log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]);
log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]);
log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT,
enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]);
}

View file

@ -0,0 +1,185 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHREFERENCEPROCESSOR_HPP
#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHREFERENCEPROCESSOR_HPP
#include "gc/shared/referenceDiscoverer.hpp"
#include "memory/allocation.hpp"
class ShenandoahMarkRefsSuperClosure;
class WorkGang;
static const size_t reference_type_count = REF_PHANTOM + 1;
typedef size_t Counters[reference_type_count];
/*
* Shenandoah concurrent reference processing
*
* Concurrent reference processing is made up of two main phases:
* 1. Concurrent reference marking: Discover all j.l.r.Reference objects and determine reachability of all live objects.
* 2. Concurrent reference processing: For all discoved j.l.r.References, determine whether to keep them alive or clean
* them. Also, clean and enqueue relevant references concurrently.
*
* Concurrent reference marking:
* The goal here is to establish the kind of reachability for all objects on the heap. We distinguish two kinds of
* reachability:
* - An object is 'strongly reachable' if it can be found by searching transitively from GC roots.
* - An object is 'finalizably reachable' if it is not strongly reachable, but can be found by searching
* from the referents of FinalReferences.
*
* These reachabilities are implemented in shenandoahMarkBitMap.*
* Conceptually, marking starts with a strong wavefront at the GC roots. Whenever a Reference object is encountered,
* it may be discovered by the ShenandoahReferenceProcessor. If it is discovered, it
* gets added to the discovered list, and that wavefront stops there, except when it's a FinalReference, in which
* case the wavefront switches to finalizable marking and marks through the referent. When a Reference is not
* discovered, e.g. if it's a SoftReference that is not eligible for discovery, then marking continues as if the
* Reference was a regular object. Whenever a strong wavefront encounters an object that is already marked
* finalizable, then the object's reachability is upgraded to strong.
*
* Concurrent reference processing:
* This happens after the concurrent marking phase and the final marking pause, when reachability for all objects
* has been established.
* The discovered list is scanned and for each reference is decided what to do:
* - If the referent is reachable (finalizable for PhantomReference, strong for all others), then the Reference
* is dropped from the discovered list and otherwise ignored
* - Otherwise its referent becomes cleared and the Reference added to the pending list, from which it will later
* be processed (e.g. enqueued in its ReferenceQueue) by the Java ReferenceHandler thread.
*
* In order to prevent resurrection by Java threads calling Reference.get() concurrently while we are clearing
* referents, we employ a special barrier, the native LRB, which returns NULL when the referent is unreachable.
*/
class ShenandoahRefProcThreadLocal : public CHeapObj<mtGC> {
private:
void* _discovered_list;
ShenandoahMarkRefsSuperClosure* _mark_closure;
Counters _encountered_count;
Counters _discovered_count;
Counters _enqueued_count;
public:
ShenandoahRefProcThreadLocal();
ShenandoahRefProcThreadLocal(const ShenandoahRefProcThreadLocal&) = delete; // non construction-copyable
ShenandoahRefProcThreadLocal& operator=(const ShenandoahRefProcThreadLocal&) = delete; // non copyable
void reset();
ShenandoahMarkRefsSuperClosure* mark_closure() const {
return _mark_closure;
}
void set_mark_closure(ShenandoahMarkRefsSuperClosure* mark_closure) {
_mark_closure = mark_closure;
}
template<typename T>
T* discovered_list_addr();
template<typename T>
oop discovered_list_head() const;
template<typename T>
void set_discovered_list_head(oop head);
size_t encountered(ReferenceType type) const {
return _encountered_count[type];
}
size_t discovered(ReferenceType type) const {
return _discovered_count[type];
}
size_t enqueued(ReferenceType type) const {
return _enqueued_count[type];
}
void inc_encountered(ReferenceType type) {
_encountered_count[type]++;
}
void inc_discovered(ReferenceType type) {
_discovered_count[type]++;
}
void inc_enqueued(ReferenceType type) {
_enqueued_count[type]++;
}
};
class ShenandoahReferenceProcessor : public ReferenceDiscoverer {
private:
ReferencePolicy* _soft_reference_policy;
ShenandoahRefProcThreadLocal* _ref_proc_thread_locals;
oop _pending_list;
void* _pending_list_tail; // T*
volatile uint _iterate_discovered_list_id;
template <typename T>
bool is_inactive(oop reference, oop referent, ReferenceType type) const;
bool is_strongly_live(oop referent) const;
bool is_softly_live(oop reference, ReferenceType type) const;
template <typename T>
bool should_discover(oop reference, ReferenceType type) const;
template <typename T>
bool should_drop(oop reference, ReferenceType type) const;
template <typename T>
void make_inactive(oop reference, ReferenceType type) const;
template <typename T>
bool discover(oop reference, ReferenceType type, uint worker_id);
template <typename T>
oop drop(oop reference, ReferenceType type);
template <typename T>
T* keep(oop reference, ReferenceType type, uint worker_id);
template <typename T>
void process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id);
void enqueue_references_locked();
void enqueue_references(bool concurrent);
void collect_statistics();
template<typename T>
void clean_discovered_list(T* list);
public:
ShenandoahReferenceProcessor(uint max_workers);
void reset_thread_locals();
void set_mark_closure(uint worker_id, ShenandoahMarkRefsSuperClosure* mark_closure);
void set_soft_reference_policy(bool clear);
bool discover_reference(oop obj, ReferenceType type) override;
void process_references(WorkGang* workers, bool concurrent);
void work();
void abandon_partial_discovery();
};
#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHREFERENCEPROCESSOR_HPP

View file

@ -69,3 +69,7 @@ JRT_END
JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_weak(oopDesc * src, oop* load_addr)) JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_weak(oopDesc * src, oop* load_addr))
return (oopDesc*) ShenandoahBarrierSet::barrier_set()->load_reference_barrier<ON_UNKNOWN_OOP_REF, oop>(oop(src), load_addr); return (oopDesc*) ShenandoahBarrierSet::barrier_set()->load_reference_barrier<ON_UNKNOWN_OOP_REF, oop>(oop(src), load_addr);
JRT_END JRT_END
JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_weak_narrow(oopDesc * src, narrowOop* load_addr))
return (oopDesc*) ShenandoahBarrierSet::barrier_set()->load_reference_barrier<ON_UNKNOWN_OOP_REF, narrowOop>(oop(src), load_addr);
JRT_END

View file

@ -42,6 +42,7 @@ public:
static oopDesc* load_reference_barrier_narrow(oopDesc* src, narrowOop* load_addr); static oopDesc* load_reference_barrier_narrow(oopDesc* src, narrowOop* load_addr);
static oopDesc* load_reference_barrier_weak(oopDesc* src, oop* load_addr); static oopDesc* load_reference_barrier_weak(oopDesc* src, oop* load_addr);
static oopDesc* load_reference_barrier_weak_narrow(oopDesc* src, narrowOop* load_addr);
static void shenandoah_clone_barrier(oopDesc* src); static void shenandoah_clone_barrier(oopDesc* src);
}; };

View file

@ -74,11 +74,15 @@ private:
// that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode // that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode
// all possible arrays. // all possible arrays.
// //
// |---------oop---------|-pow-|--chunk---| // |xx-------oop---------|-pow-|--chunk---|
// 0 49 54 64 // 0 49 54 64
// //
// By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1. // By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1.
// //
// Lower bits of oop are reserved to handle "skip_live" and "strong" properties. Since this encoding
// stores uncompressed oops, those bits are always available. These bits default to zero for "skip_live"
// and "weak". This aligns with their frequent values: strong/counted-live references.
//
// This encoding gives a few interesting benefits: // This encoding gives a few interesting benefits:
// //
// a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task: // a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task:
@ -145,7 +149,9 @@ private:
static const uint8_t pow_shift = oop_bits; static const uint8_t pow_shift = oop_bits;
static const uint8_t chunk_shift = oop_bits + pow_bits; static const uint8_t chunk_shift = oop_bits + pow_bits;
static const uintptr_t oop_extract_mask = right_n_bits(oop_bits); static const uintptr_t oop_extract_mask = right_n_bits(oop_bits) - 3;
static const uintptr_t skip_live_extract_mask = 1 << 0;
static const uintptr_t weak_extract_mask = 1 << 1;
static const uintptr_t chunk_pow_extract_mask = ~right_n_bits(oop_bits); static const uintptr_t chunk_pow_extract_mask = ~right_n_bits(oop_bits);
static const int chunk_range_mask = right_n_bits(chunk_bits); static const int chunk_range_mask = right_n_bits(chunk_bits);
@ -169,9 +175,24 @@ private:
return (int) ((val >> pow_shift) & pow_range_mask); return (int) ((val >> pow_shift) & pow_range_mask);
} }
inline uintptr_t encode_oop(oop obj) const { inline bool decode_weak(uintptr_t val) const {
return (val & weak_extract_mask) != 0;
}
inline bool decode_cnt_live(uintptr_t val) const {
return (val & skip_live_extract_mask) == 0;
}
inline uintptr_t encode_oop(oop obj, bool skip_live, bool weak) const {
STATIC_ASSERT(oop_shift == 0); STATIC_ASSERT(oop_shift == 0);
return cast_from_oop<uintptr_t>(obj); uintptr_t encoded = cast_from_oop<uintptr_t>(obj);
if (skip_live) {
encoded |= skip_live_extract_mask;
}
if (weak) {
encoded |= weak_extract_mask;
}
return encoded;
} }
inline uintptr_t encode_chunk(int chunk) const { inline uintptr_t encode_chunk(int chunk) const {
@ -183,19 +204,23 @@ private:
} }
public: public:
ShenandoahMarkTask(oop o = NULL) { ShenandoahMarkTask(oop o = NULL, bool skip_live = false, bool weak = false) {
uintptr_t enc = encode_oop(o); uintptr_t enc = encode_oop(o, skip_live, weak);
assert(decode_oop(enc) == o, "oop encoding should work: " PTR_FORMAT, p2i(o)); assert(decode_oop(enc) == o, "oop encoding should work: " PTR_FORMAT, p2i(o));
assert(decode_cnt_live(enc) == !skip_live, "skip_live encoding should work");
assert(decode_weak(enc) == weak, "weak encoding should work");
assert(decode_not_chunked(enc), "task should not be chunked"); assert(decode_not_chunked(enc), "task should not be chunked");
_obj = enc; _obj = enc;
} }
ShenandoahMarkTask(oop o, int chunk, int pow) { ShenandoahMarkTask(oop o, bool skip_live, bool weak, int chunk, int pow) {
uintptr_t enc_oop = encode_oop(o); uintptr_t enc_oop = encode_oop(o, skip_live, weak);
uintptr_t enc_chunk = encode_chunk(chunk); uintptr_t enc_chunk = encode_chunk(chunk);
uintptr_t enc_pow = encode_pow(pow); uintptr_t enc_pow = encode_pow(pow);
uintptr_t enc = enc_oop | enc_chunk | enc_pow; uintptr_t enc = enc_oop | enc_chunk | enc_pow;
assert(decode_oop(enc) == o, "oop encoding should work: " PTR_FORMAT, p2i(o)); assert(decode_oop(enc) == o, "oop encoding should work: " PTR_FORMAT, p2i(o));
assert(decode_cnt_live(enc) == !skip_live, "skip_live should be true for chunked tasks");
assert(decode_weak(enc) == weak, "weak encoding should work");
assert(decode_chunk(enc) == chunk, "chunk encoding should work: %d", chunk); assert(decode_chunk(enc) == chunk, "chunk encoding should work: %d", chunk);
assert(decode_pow(enc) == pow, "pow encoding should work: %d", pow); assert(decode_pow(enc) == pow, "pow encoding should work: %d", pow);
assert(!decode_not_chunked(enc), "task should be chunked"); assert(!decode_not_chunked(enc), "task should be chunked");
@ -210,6 +235,8 @@ public:
inline int pow() const { return decode_pow(_obj); } inline int pow() const { return decode_pow(_obj); }
inline bool is_not_chunked() const { return decode_not_chunked(_obj); } inline bool is_not_chunked() const { return decode_not_chunked(_obj); }
inline bool is_weak() const { return decode_weak(_obj); }
inline bool count_liveness() const { return decode_cnt_live(_obj); }
DEBUG_ONLY(bool is_valid() const;) // Tasks to be pushed/popped must be valid. DEBUG_ONLY(bool is_valid() const;) // Tasks to be pushed/popped must be valid.
@ -232,12 +259,14 @@ private:
static const int pow_max = nth_bit(pow_bits) - 1; static const int pow_max = nth_bit(pow_bits) - 1;
oop _obj; oop _obj;
bool _skip_live;
bool _weak;
int _chunk; int _chunk;
int _pow; int _pow;
public: public:
ShenandoahMarkTask(oop o = NULL, int chunk = 0, int pow = 0): ShenandoahMarkTask(oop o = NULL, bool skip_live = false, bool weak = false, int chunk = 0, int pow = 0):
_obj(o), _chunk(chunk), _pow(pow) { _obj(o), _skip_live(skip_live), _weak(weak), _chunk(chunk), _pow(pow) {
assert(0 <= chunk && chunk <= chunk_max, "chunk is in range: %d", chunk); assert(0 <= chunk && chunk <= chunk_max, "chunk is in range: %d", chunk);
assert(0 <= pow && pow <= pow_max, "pow is in range: %d", pow); assert(0 <= pow && pow <= pow_max, "pow is in range: %d", pow);
} }
@ -248,6 +277,8 @@ public:
inline int chunk() const { return _chunk; } inline int chunk() const { return _chunk; }
inline int pow() const { return _pow; } inline int pow() const { return _pow; }
inline bool is_not_chunked() const { return _chunk == 0; } inline bool is_not_chunked() const { return _chunk == 0; }
inline bool is_weak() const { return _weak; }
inline bool count_liveness() const { return !_skip_live; }
DEBUG_ONLY(bool is_valid() const;) // Tasks to be pushed/popped must be valid. DEBUG_ONLY(bool is_valid() const;) // Tasks to be pushed/popped must be valid.

View file

@ -59,9 +59,9 @@ public:
virtual void doit(); virtual void doit();
}; };
class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahReferenceOperation { class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahOperation {
public: public:
VM_ShenandoahFinalMarkStartEvac() : VM_ShenandoahReferenceOperation() {}; VM_ShenandoahFinalMarkStartEvac() : VM_ShenandoahOperation() {};
VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; } VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; }
const char* name() const { return "Shenandoah Final Mark and Start Evacuation"; } const char* name() const { return "Shenandoah Final Mark and Start Evacuation"; }
virtual void doit(); virtual void doit();

View file

@ -47,6 +47,17 @@
#undef verify_oop #undef verify_oop
#endif #endif
static bool is_instance_ref_klass(Klass* k) {
return k->is_instance_klass() && InstanceKlass::cast(k)->reference_type() != REF_NONE;
}
class ShenandoahIgnoreReferenceDiscoverer : public ReferenceDiscoverer {
public:
virtual bool discover_reference(oop obj, ReferenceType type) {
return true;
}
};
class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
private: private:
const char* _phase; const char* _phase;
@ -68,7 +79,12 @@ public:
_map(map), _map(map),
_ld(ld), _ld(ld),
_interior_loc(NULL), _interior_loc(NULL),
_loc(NULL) { } _loc(NULL) {
if (options._verify_marked == ShenandoahVerifier::_verify_marked_complete_except_references ||
options._verify_marked == ShenandoahVerifier::_verify_marked_disable) {
set_ref_discoverer_internal(new ShenandoahIgnoreReferenceDiscoverer());
}
}
private: private:
void check(ShenandoahAsserts::SafeLevel level, oop obj, bool test, const char* label) { void check(ShenandoahAsserts::SafeLevel level, oop obj, bool test, const char* label) {
@ -82,7 +98,9 @@ private:
T o = RawAccess<>::oop_load(p); T o = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(o)) { if (!CompressedOops::is_null(o)) {
oop obj = CompressedOops::decode_not_null(o); oop obj = CompressedOops::decode_not_null(o);
if (is_instance_ref_klass(obj->klass())) {
obj = ShenandoahForwarding::get_forwardee(obj);
}
// Single threaded verification can use faster non-atomic stack and bitmap // Single threaded verification can use faster non-atomic stack and bitmap
// methods. // methods.
// //
@ -208,6 +226,10 @@ private:
check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj), check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj),
"Must be marked in complete bitmap"); "Must be marked in complete bitmap");
break; break;
case ShenandoahVerifier::_verify_marked_complete_except_references:
check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj),
"Must be marked in complete bitmap, except j.l.r.Reference referents");
break;
default: default:
assert(false, "Unhandled mark verification"); assert(false, "Unhandled mark verification");
} }
@ -526,19 +548,19 @@ public:
virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
size_t processed = 0; size_t processed = 0;
MarkBitMap* mark_bit_map = _heap->complete_marking_context()->mark_bit_map(); ShenandoahMarkingContext* ctx = _heap->complete_marking_context();
HeapWord* tams = _heap->complete_marking_context()->top_at_mark_start(r); HeapWord* tams = ctx->top_at_mark_start(r);
// Bitmaps, before TAMS // Bitmaps, before TAMS
if (tams > r->bottom()) { if (tams > r->bottom()) {
HeapWord* start = r->bottom(); HeapWord* start = r->bottom();
HeapWord* addr = mark_bit_map->get_next_marked_addr(start, tams); HeapWord* addr = ctx->get_next_marked_addr(start, tams);
while (addr < tams) { while (addr < tams) {
verify_and_follow(addr, stack, cl, &processed); verify_and_follow(addr, stack, cl, &processed);
addr += 1; addr += 1;
if (addr < tams) { if (addr < tams) {
addr = mark_bit_map->get_next_marked_addr(addr, tams); addr = ctx->get_next_marked_addr(addr, tams);
} }
} }
} }
@ -566,9 +588,10 @@ public:
// Verify everything reachable from that object too, hopefully realizing // Verify everything reachable from that object too, hopefully realizing
// everything was already marked, and never touching further: // everything was already marked, and never touching further:
if (!is_instance_ref_klass(obj->klass())) {
cl.verify_oops_from(obj); cl.verify_oops_from(obj);
(*processed)++; (*processed)++;
}
while (!stack.is_empty()) { while (!stack.is_empty()) {
ShenandoahVerifierTask task = stack.pop(); ShenandoahVerifierTask task = stack.pop();
cl.verify_oops_from(task.obj()); cl.verify_oops_from(task.obj());
@ -718,7 +741,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
// version // version
size_t count_marked = 0; size_t count_marked = 0;
if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete) { if (ShenandoahVerifyLevel >= 4 && (marked == _verify_marked_complete || marked == _verify_marked_complete_except_references)) {
guarantee(_heap->marking_context()->is_complete(), "Marking context should be complete"); guarantee(_heap->marking_context()->is_complete(), "Marking context should be complete");
ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options); ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options);
_heap->workers()->run_task(&task); _heap->workers()->run_task(&task);
@ -793,7 +816,7 @@ void ShenandoahVerifier::verify_after_concmark() {
verify_at_safepoint( verify_at_safepoint(
"After Mark", "After Mark",
_verify_forwarded_none, // no forwarded references _verify_forwarded_none, // no forwarded references
_verify_marked_complete, // bitmaps as precise as we can get _verify_marked_complete_except_references, // bitmaps as precise as we can get, except dangling j.l.r.Refs
_verify_cset_none, // no references to cset anymore _verify_cset_none, // no references to cset anymore
_verify_liveness_complete, // liveness data must be complete here _verify_liveness_complete, // liveness data must be complete here
_verify_regions_disable, // trash regions not yet recycled _verify_regions_disable, // trash regions not yet recycled
@ -811,7 +834,7 @@ void ShenandoahVerifier::verify_before_evacuation() {
verify_at_safepoint( verify_at_safepoint(
"Before Evacuation", "Before Evacuation",
_verify_forwarded_none, // no forwarded references _verify_forwarded_none, // no forwarded references
_verify_marked_complete, // walk over marked objects too _verify_marked_complete_except_references, // walk over marked objects too
_verify_cset_disable, // non-forwarded references to cset expected _verify_cset_disable, // non-forwarded references to cset expected
_verify_liveness_complete, // liveness data must be complete here _verify_liveness_complete, // liveness data must be complete here
_verify_regions_disable, // trash regions not yet recycled _verify_regions_disable, // trash regions not yet recycled

View file

@ -65,7 +65,11 @@ public:
_verify_marked_incomplete, _verify_marked_incomplete,
// Objects should be marked in "complete" bitmap. // Objects should be marked in "complete" bitmap.
_verify_marked_complete _verify_marked_complete,
// Objects should be marked in "complete" bitmap, except j.l.r.Reference referents, which
// may be dangling after marking but before conc-weakrefs-processing.
_verify_marked_complete_except_references
} VerifyMarked; } VerifyMarked;
typedef enum { typedef enum {

View file

@ -32,6 +32,7 @@ uint ShenandoahWorkerPolicy::_prev_par_marking = 0;
uint ShenandoahWorkerPolicy::_prev_conc_marking = 0; uint ShenandoahWorkerPolicy::_prev_conc_marking = 0;
uint ShenandoahWorkerPolicy::_prev_conc_evac = 0; uint ShenandoahWorkerPolicy::_prev_conc_evac = 0;
uint ShenandoahWorkerPolicy::_prev_conc_root_proc = 0; uint ShenandoahWorkerPolicy::_prev_conc_root_proc = 0;
uint ShenandoahWorkerPolicy::_prev_conc_refs_proc = 0;
uint ShenandoahWorkerPolicy::_prev_fullgc = 0; uint ShenandoahWorkerPolicy::_prev_fullgc = 0;
uint ShenandoahWorkerPolicy::_prev_degengc = 0; uint ShenandoahWorkerPolicy::_prev_degengc = 0;
uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0; uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0;
@ -63,6 +64,16 @@ uint ShenandoahWorkerPolicy::calc_workers_for_final_marking() {
return _prev_par_marking; return _prev_par_marking;
} }
// Calculate workers for concurrent refs processing
uint ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing() {
uint active_workers = (_prev_conc_refs_proc == 0) ? ConcGCThreads : _prev_conc_refs_proc;
_prev_conc_refs_proc =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_refs_proc;
}
// Calculate workers for concurrent root processing // Calculate workers for concurrent root processing
uint ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing() { uint ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing() {
uint active_workers = (_prev_conc_root_proc == 0) ? ConcGCThreads : _prev_conc_root_proc; uint active_workers = (_prev_conc_root_proc == 0) ? ConcGCThreads : _prev_conc_root_proc;
@ -123,11 +134,6 @@ uint ShenandoahWorkerPolicy::calc_workers_for_final_update_ref() {
return _prev_par_update_ref; return _prev_par_update_ref;
} }
uint ShenandoahWorkerPolicy::calc_workers_for_conc_preclean() {
// Precleaning is single-threaded
return 1;
}
uint ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup() { uint ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup() {
uint active_workers = (_prev_conc_cleanup == 0) ? ConcGCThreads : _prev_conc_cleanup; uint active_workers = (_prev_conc_cleanup == 0) ? ConcGCThreads : _prev_conc_cleanup;
_prev_conc_cleanup = _prev_conc_cleanup =

View file

@ -32,6 +32,7 @@ private:
static uint _prev_par_marking; static uint _prev_par_marking;
static uint _prev_conc_marking; static uint _prev_conc_marking;
static uint _prev_conc_root_proc; static uint _prev_conc_root_proc;
static uint _prev_conc_refs_proc;
static uint _prev_conc_evac; static uint _prev_conc_evac;
static uint _prev_fullgc; static uint _prev_fullgc;
static uint _prev_degengc; static uint _prev_degengc;
@ -53,6 +54,9 @@ public:
// Calculate workers for concurrent root processing // Calculate workers for concurrent root processing
static uint calc_workers_for_conc_root_processing(); static uint calc_workers_for_conc_root_processing();
// Calculate workers for concurrent refs processing
static uint calc_workers_for_conc_refs_processing();
// Calculate workers for concurrent evacuation (concurrent GC) // Calculate workers for concurrent evacuation (concurrent GC)
static uint calc_workers_for_conc_evac(); static uint calc_workers_for_conc_evac();
@ -68,9 +72,6 @@ public:
// Calculate workers for parallel/final reference update // Calculate workers for parallel/final reference update
static uint calc_workers_for_final_update_ref(); static uint calc_workers_for_final_update_ref();
// Calculate workers for concurrent precleaning
static uint calc_workers_for_conc_preclean();
// Calculate workers for concurrent cleanup // Calculate workers for concurrent cleanup
static uint calc_workers_for_conc_cleanup(); static uint calc_workers_for_conc_cleanup();

View file

@ -76,13 +76,6 @@
" compact - run GC more frequently and with deeper targets to " \ " compact - run GC more frequently and with deeper targets to " \
"free up more memory.") \ "free up more memory.") \
\ \
product(uintx, ShenandoahRefProcFrequency, 5, EXPERIMENTAL, \
"Process process weak (soft, phantom, finalizers) references " \
"every Nth cycle. Normally affects concurrent GC cycles only, " \
"as degenerated and full GCs would try to process references " \
"regardless. Set to zero to disable reference processing " \
"completely.") \
\
product(uintx, ShenandoahUnloadClassesFrequency, 1, EXPERIMENTAL, \ product(uintx, ShenandoahUnloadClassesFrequency, 1, EXPERIMENTAL, \
"Unload the classes every Nth cycle. Normally affects concurrent "\ "Unload the classes every Nth cycle. Normally affects concurrent "\
"GC cycles, as degenerated and full GCs would try to unload " \ "GC cycles, as degenerated and full GCs would try to unload " \
@ -313,11 +306,6 @@
"Forcefully flush non-empty SATB buffers at this interval. " \ "Forcefully flush non-empty SATB buffers at this interval. " \
"Time is in milliseconds.") \ "Time is in milliseconds.") \
\ \
product(bool, ShenandoahPreclean, true, DIAGNOSTIC, \
"Do concurrent preclean phase before final mark: process " \
"definitely alive references to avoid dealing with them during " \
"pause.") \
\
product(bool, ShenandoahSuspendibleWorkers, false, EXPERIMENTAL, \ product(bool, ShenandoahSuspendibleWorkers, false, EXPERIMENTAL, \
"Suspend concurrent GC worker threads at safepoints") \ "Suspend concurrent GC worker threads at safepoints") \
\ \

View file

@ -28,6 +28,7 @@ package gc;
* @key randomness * @key randomness
* @summary Tests that all SoftReferences has been cleared at time of OOM. * @summary Tests that all SoftReferences has been cleared at time of OOM.
* @requires vm.gc != "Z" * @requires vm.gc != "Z"
* @requires vm.gc != "Shenandoah"
* @library /test/lib * @library /test/lib
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @run main/othervm -Xmx128m gc.TestSoftReferencesBehaviorOnOOME 512 2k * @run main/othervm -Xmx128m gc.TestSoftReferencesBehaviorOnOOME 512 2k