8296875: Generational ZGC: Refactor loom code

Co-authored-by: Stefan Karlsson <stefank@openjdk.org>
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org>
Reviewed-by: stefank, rrich, pchilanomate
This commit is contained in:
Erik Österlund 2022-11-30 14:08:57 +00:00
parent 301cf52fa2
commit be99e84c98
42 changed files with 752 additions and 339 deletions

View file

@ -998,7 +998,6 @@ static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots)
__ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/); OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize)/ VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
ContinuationEntry::setup_oopmap(map);
__ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset())); __ ldr(rscratch1, Address(rthread, JavaThread::cont_entry_offset()));
__ str(rscratch1, Address(sp, ContinuationEntry::parent_offset())); __ str(rscratch1, Address(sp, ContinuationEntry::parent_offset()));

View file

@ -1657,7 +1657,6 @@ static OopMap* continuation_enter_setup(MacroAssembler* masm, int& framesize_wor
__ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes __ push_frame(frame_size_in_bytes , R0); // SP -= frame_size_in_bytes
OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); OopMap* map = new OopMap((int)frame_size_in_bytes / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
ContinuationEntry::setup_oopmap(map);
__ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread); __ ld_ptr(R0, JavaThread::cont_entry_offset(), R16_thread);
__ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread); __ st_ptr(R1_SP, JavaThread::cont_entry_offset(), R16_thread);

View file

@ -873,7 +873,6 @@ static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots)
__ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata __ sub(sp, sp, (int)ContinuationEntry::size()); // place Continuation metadata
OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize) / VMRegImpl::stack_slot_size, 0 /* arg_slots*/); OopMap* map = new OopMap(((int)ContinuationEntry::size() + wordSize) / VMRegImpl::stack_slot_size, 0 /* arg_slots*/);
ContinuationEntry::setup_oopmap(map);
__ ld(t0, Address(xthread, JavaThread::cont_entry_offset())); __ ld(t0, Address(xthread, JavaThread::cont_entry_offset()));
__ sd(t0, Address(sp, ContinuationEntry::parent_offset())); __ sd(t0, Address(sp, ContinuationEntry::parent_offset()));

View file

@ -1293,7 +1293,6 @@ static OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots)
int frame_size = (checked_cast<int>(ContinuationEntry::size()) + wordSize) / VMRegImpl::stack_slot_size; int frame_size = (checked_cast<int>(ContinuationEntry::size()) + wordSize) / VMRegImpl::stack_slot_size;
OopMap* map = new OopMap(frame_size, 0); OopMap* map = new OopMap(frame_size, 0);
ContinuationEntry::setup_oopmap(map);
__ movptr(rax, Address(r15_thread, JavaThread::cont_entry_offset())); __ movptr(rax, Address(r15_thread, JavaThread::cont_entry_offset()));
__ movptr(Address(rsp, ContinuationEntry::parent_offset()), rax); __ movptr(Address(rsp, ContinuationEntry::parent_offset()), rax);

View file

@ -1909,7 +1909,7 @@ oop java_lang_Thread::async_get_stack_trace(oop java_thread, TRAPS) {
if (java_lang_VirtualThread::is_instance(_java_thread())) { if (java_lang_VirtualThread::is_instance(_java_thread())) {
// if (thread->vthread() != _java_thread()) // We might be inside a System.executeOnCarrierThread // if (thread->vthread() != _java_thread()) // We might be inside a System.executeOnCarrierThread
const ContinuationEntry* ce = thread->vthread_continuation(); const ContinuationEntry* ce = thread->vthread_continuation();
if (ce == nullptr || ce->cont_oop() != java_lang_VirtualThread::continuation(_java_thread())) { if (ce == nullptr || ce->cont_oop(thread) != java_lang_VirtualThread::continuation(_java_thread())) {
return; // not mounted return; // not mounted
} }
} else { } else {

View file

@ -40,6 +40,7 @@ EpsilonBarrierSet::EpsilonBarrierSet() : BarrierSet(
make_barrier_set_c1<BarrierSetC1>(), make_barrier_set_c1<BarrierSetC1>(),
make_barrier_set_c2<BarrierSetC2>(), make_barrier_set_c2<BarrierSetC2>(),
NULL /* barrier_set_nmethod */, NULL /* barrier_set_nmethod */,
NULL /* barrier_set_stack_chunk */,
BarrierSet::FakeRtti(BarrierSet::EpsilonBarrierSet)) {} BarrierSet::FakeRtti(BarrierSet::EpsilonBarrierSet)) {}
void EpsilonBarrierSet::on_thread_create(Thread *thread) { void EpsilonBarrierSet::on_thread_create(Thread *thread) {

View file

@ -26,6 +26,7 @@
#include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/barrierSetStackChunk.hpp"
#include "runtime/continuation.hpp" #include "runtime/continuation.hpp"
#include "runtime/javaThread.hpp" #include "runtime/javaThread.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
@ -62,16 +63,26 @@ static BarrierSetNMethod* select_barrier_set_nmethod(BarrierSetNMethod* barrier_
} }
} }
static BarrierSetStackChunk* select_barrier_set_stack_chunk(BarrierSetStackChunk* barrier_set_stack_chunk) {
if (barrier_set_stack_chunk != NULL) {
return barrier_set_stack_chunk;
} else {
return new BarrierSetStackChunk();
}
}
BarrierSet::BarrierSet(BarrierSetAssembler* barrier_set_assembler, BarrierSet::BarrierSet(BarrierSetAssembler* barrier_set_assembler,
BarrierSetC1* barrier_set_c1, BarrierSetC1* barrier_set_c1,
BarrierSetC2* barrier_set_c2, BarrierSetC2* barrier_set_c2,
BarrierSetNMethod* barrier_set_nmethod, BarrierSetNMethod* barrier_set_nmethod,
BarrierSetStackChunk* barrier_set_stack_chunk,
const FakeRtti& fake_rtti) : const FakeRtti& fake_rtti) :
_fake_rtti(fake_rtti), _fake_rtti(fake_rtti),
_barrier_set_assembler(barrier_set_assembler), _barrier_set_assembler(barrier_set_assembler),
_barrier_set_c1(barrier_set_c1), _barrier_set_c1(barrier_set_c1),
_barrier_set_c2(barrier_set_c2), _barrier_set_c2(barrier_set_c2),
_barrier_set_nmethod(select_barrier_set_nmethod(barrier_set_nmethod)) { _barrier_set_nmethod(select_barrier_set_nmethod(barrier_set_nmethod)),
_barrier_set_stack_chunk(select_barrier_set_stack_chunk(barrier_set_stack_chunk)) {
} }
void BarrierSet::on_thread_attach(Thread* thread) { void BarrierSet::on_thread_attach(Thread* thread) {

View file

@ -37,6 +37,7 @@ class BarrierSetAssembler;
class BarrierSetC1; class BarrierSetC1;
class BarrierSetC2; class BarrierSetC2;
class BarrierSetNMethod; class BarrierSetNMethod;
class BarrierSetStackChunk;
class JavaThread; class JavaThread;
// This class provides the interface between a barrier implementation and // This class provides the interface between a barrier implementation and
@ -74,6 +75,7 @@ private:
BarrierSetC1* _barrier_set_c1; BarrierSetC1* _barrier_set_c1;
BarrierSetC2* _barrier_set_c2; BarrierSetC2* _barrier_set_c2;
BarrierSetNMethod* _barrier_set_nmethod; BarrierSetNMethod* _barrier_set_nmethod;
BarrierSetStackChunk* _barrier_set_stack_chunk;
public: public:
// Metafunction mapping a class derived from BarrierSet to the // Metafunction mapping a class derived from BarrierSet to the
@ -98,6 +100,7 @@ protected:
BarrierSetC1* barrier_set_c1, BarrierSetC1* barrier_set_c1,
BarrierSetC2* barrier_set_c2, BarrierSetC2* barrier_set_c2,
BarrierSetNMethod* barrier_set_nmethod, BarrierSetNMethod* barrier_set_nmethod,
BarrierSetStackChunk* barrier_set_stack_chunk,
const FakeRtti& fake_rtti); const FakeRtti& fake_rtti);
~BarrierSet() { } ~BarrierSet() { }
@ -165,6 +168,11 @@ public:
return _barrier_set_nmethod; return _barrier_set_nmethod;
} }
BarrierSetStackChunk* barrier_set_stack_chunk() {
assert(_barrier_set_stack_chunk != NULL, "should be set");
return _barrier_set_stack_chunk;
}
// The AccessBarrier of a BarrierSet subclass is called by the Access API // The AccessBarrier of a BarrierSet subclass is called by the Access API
// (cf. oops/access.hpp) to perform decorated accesses. GC implementations // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
// may override these default access operations by declaring an // may override these default access operations by declaring an

View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/barrierSetStackChunk.hpp"
#include "memory/iterator.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopsHierarchy.hpp"
#include "oops/stackChunkOop.inline.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
class UncompressOopsOopClosure : public OopClosure {
public:
void do_oop(oop* p) override {
assert(UseCompressedOops, "Only needed with compressed oops");
oop obj = CompressedOops::decode(*(narrowOop*)p);
assert(obj == nullptr || dbg_is_good_oop(obj), "p: " INTPTR_FORMAT " obj: " INTPTR_FORMAT, p2i(p), p2i((oopDesc*)obj));
*p = obj;
}
void do_oop(narrowOop* p) override {}
};
class CompressOopsOopClosure : public OopClosure {
stackChunkOop _chunk;
BitMapView _bm;
void convert_oop_to_narrowOop(oop* p) {
oop obj = *p;
*p = nullptr;
*(narrowOop*)p = CompressedOops::encode(obj);
}
template <typename T>
void do_oop_work(T* p) {
BitMap::idx_t index = _chunk->bit_index_for(p);
assert(!_bm.at(index), "must not be set already");
_bm.set_bit(index);
}
public:
CompressOopsOopClosure(stackChunkOop chunk)
: _chunk(chunk), _bm(chunk->bitmap()) {}
virtual void do_oop(oop* p) override {
if (UseCompressedOops) {
// Convert all oops to narrow before marking the oop in the bitmap.
convert_oop_to_narrowOop(p);
do_oop_work((narrowOop*)p);
} else {
do_oop_work(p);
}
}
virtual void do_oop(narrowOop* p) override {
do_oop_work(p);
}
};
void BarrierSetStackChunk::encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) {
CompressOopsOopClosure cl(chunk);
iterator->oops_do(&cl);
}
void BarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) {
if (chunk->has_bitmap() && UseCompressedOops) {
UncompressOopsOopClosure cl;
iterator->oops_do(&cl);
}
}
oop BarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) {
return RawAccess<>::oop_load(addr);
}
oop BarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) {
return RawAccess<>::oop_load(addr);
}

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_BARRIERSETSTACKCHUNK_HPP
#define SHARE_GC_SHARED_BARRIERSETSTACKCHUNK_HPP
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
class OopClosure;
class BarrierSetStackChunk: public CHeapObj<mtGC> {
public:
virtual void encode_gc_mode(stackChunkOop chunk, OopIterator* oop_iterator);
virtual void decode_gc_mode(stackChunkOop chunk, OopIterator* oop_iterator);
virtual oop load_oop(stackChunkOop chunk, oop* addr);
virtual oop load_oop(stackChunkOop chunk, narrowOop* addr);
};
#endif // SHARE_GC_SHARED_BARRIERSETSTACKCHUNK_HPP

View file

@ -253,7 +253,7 @@ void MemAllocator::Allocation::notify_allocation(JavaThread* thread) {
notify_allocation_jvmti_sampler(); notify_allocation_jvmti_sampler();
} }
HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const { HeapWord* MemAllocator::mem_allocate_outside_tlab(Allocation& allocation) const {
allocation._allocated_outside_tlab = true; allocation._allocated_outside_tlab = true;
HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded); HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
if (mem == NULL) { if (mem == NULL) {
@ -267,24 +267,24 @@ HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
return mem; return mem;
} }
HeapWord* MemAllocator::allocate_inside_tlab(Allocation& allocation) const { HeapWord* MemAllocator::mem_allocate_inside_tlab(Allocation& allocation) const {
assert(UseTLAB, "should use UseTLAB"); assert(UseTLAB, "should use UseTLAB");
// Try allocating from an existing TLAB. // Try allocating from an existing TLAB.
HeapWord* mem = allocate_inside_tlab_fast(); HeapWord* mem = mem_allocate_inside_tlab_fast();
if (mem != NULL) { if (mem != NULL) {
return mem; return mem;
} }
// Try refilling the TLAB and allocating the object in it. // Try refilling the TLAB and allocating the object in it.
return allocate_inside_tlab_slow(allocation); return mem_allocate_inside_tlab_slow(allocation);
} }
HeapWord* MemAllocator::allocate_inside_tlab_fast() const { HeapWord* MemAllocator::mem_allocate_inside_tlab_fast() const {
return _thread->tlab().allocate(_word_size); return _thread->tlab().allocate(_word_size);
} }
HeapWord* MemAllocator::allocate_inside_tlab_slow(Allocation& allocation) const { HeapWord* MemAllocator::mem_allocate_inside_tlab_slow(Allocation& allocation) const {
HeapWord* mem = NULL; HeapWord* mem = NULL;
ThreadLocalAllocBuffer& tlab = _thread->tlab(); ThreadLocalAllocBuffer& tlab = _thread->tlab();
@ -351,15 +351,32 @@ HeapWord* MemAllocator::allocate_inside_tlab_slow(Allocation& allocation) const
return mem; return mem;
} }
HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
HeapWord* MemAllocator::mem_allocate_slow(Allocation& allocation) const {
// Allocation of an oop can always invoke a safepoint.
debug_only(JavaThread::cast(_thread)->check_for_valid_safepoint_state());
if (UseTLAB) { if (UseTLAB) {
HeapWord* result = allocate_inside_tlab(allocation); // Try refilling the TLAB and allocating the object in it.
if (result != NULL) { HeapWord* mem = mem_allocate_inside_tlab_slow(allocation);
return result; if (mem != NULL) {
return mem;
} }
} }
return allocate_outside_tlab(allocation); return mem_allocate_outside_tlab(allocation);
}
HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
if (UseTLAB) {
// Try allocating from an existing TLAB.
HeapWord* mem = mem_allocate_inside_tlab_fast();
if (mem != NULL) {
return mem;
}
}
return mem_allocate_slow(allocation);
} }
oop MemAllocator::allocate() const { oop MemAllocator::allocate() const {
@ -378,21 +395,6 @@ oop MemAllocator::allocate() const {
return obj; return obj;
} }
oop MemAllocator::try_allocate_in_existing_tlab() {
oop obj = NULL;
if (UseTLAB) {
HeapWord* mem = allocate_inside_tlab_fast();
if (mem != NULL) {
obj = initialize(mem);
} else {
// The unhandled oop detector will poison local variable obj,
// so reset it to NULL if mem is NULL.
obj = NULL;
}
}
return obj;
}
void MemAllocator::mem_clear(HeapWord* mem) const { void MemAllocator::mem_clear(HeapWord* mem) const {
assert(mem != NULL, "cannot initialize NULL object"); assert(mem != NULL, "cannot initialize NULL object");
const size_t hs = oopDesc::header_size(); const size_t hs = oopDesc::header_size();
@ -447,20 +449,3 @@ oop ClassAllocator::initialize(HeapWord* mem) const {
java_lang_Class::set_oop_size(mem, _word_size); java_lang_Class::set_oop_size(mem, _word_size);
return finish(mem); return finish(mem);
} }
// Does the minimal amount of initialization needed for a TLAB allocation.
// We don't need to do a full initialization, as such an allocation need not be immediately walkable.
oop StackChunkAllocator::initialize(HeapWord* mem) const {
assert(_stack_size > 0, "");
assert(_stack_size <= max_jint, "");
assert(_word_size > _stack_size, "");
// zero out fields (but not the stack)
const size_t hs = oopDesc::header_size();
Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
jdk_internal_vm_StackChunk::set_sp(mem, (int)_stack_size);
return finish(mem);
}

View file

@ -42,12 +42,19 @@ protected:
Klass* const _klass; Klass* const _klass;
const size_t _word_size; const size_t _word_size;
// Allocate from the current thread's TLAB, without taking a new TLAB (no safepoint).
HeapWord* mem_allocate_inside_tlab_fast() const;
private: private:
// Allocate from the current thread's TLAB, with broken-out slow path. // Allocate in a TLAB. Could allocate a new TLAB, and therefore potentially safepoint.
HeapWord* allocate_inside_tlab(Allocation& allocation) const; HeapWord* mem_allocate_inside_tlab(Allocation& allocation) const;
HeapWord* allocate_inside_tlab_fast() const; HeapWord* mem_allocate_inside_tlab_slow(Allocation& allocation) const;
HeapWord* allocate_inside_tlab_slow(Allocation& allocation) const;
HeapWord* allocate_outside_tlab(Allocation& allocation) const; // Allocate outside a TLAB. Could safepoint.
HeapWord* mem_allocate_outside_tlab(Allocation& allocation) const;
// Fast-path TLAB allocation failed. Takes a slow-path and potentially safepoint.
HeapWord* mem_allocate_slow(Allocation& allocation) const;
protected: protected:
MemAllocator(Klass* klass, size_t word_size, Thread* thread) MemAllocator(Klass* klass, size_t word_size, Thread* thread)
@ -78,13 +85,13 @@ protected:
public: public:
// Allocate and fully construct the object, and perform various instrumentation. Could safepoint. // Allocate and fully construct the object, and perform various instrumentation. Could safepoint.
oop allocate() const; oop allocate() const;
oop try_allocate_in_existing_tlab();
}; };
class ObjAllocator: public MemAllocator { class ObjAllocator: public MemAllocator {
public: public:
ObjAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current()) ObjAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current())
: MemAllocator(klass, word_size, thread) {} : MemAllocator(klass, word_size, thread) {}
virtual oop initialize(HeapWord* mem) const; virtual oop initialize(HeapWord* mem) const;
}; };
@ -101,6 +108,7 @@ public:
: MemAllocator(klass, word_size, thread), : MemAllocator(klass, word_size, thread),
_length(length), _length(length),
_do_zero(do_zero) {} _do_zero(do_zero) {}
virtual oop initialize(HeapWord* mem) const; virtual oop initialize(HeapWord* mem) const;
}; };
@ -108,16 +116,7 @@ class ClassAllocator: public MemAllocator {
public: public:
ClassAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current()) ClassAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current())
: MemAllocator(klass, word_size, thread) {} : MemAllocator(klass, word_size, thread) {}
virtual oop initialize(HeapWord* mem) const;
};
class StackChunkAllocator : public MemAllocator {
const size_t _stack_size;
public:
StackChunkAllocator(Klass* klass, size_t word_size, size_t stack_size, Thread* thread = Thread::current())
: MemAllocator(klass, word_size, thread),
_stack_size(stack_size) {}
virtual oop initialize(HeapWord* mem) const; virtual oop initialize(HeapWord* mem) const;
}; };

View file

@ -40,6 +40,7 @@ protected:
barrier_set_c1, barrier_set_c1,
barrier_set_c2, barrier_set_c2,
NULL /* barrier_set_nmethod */, NULL /* barrier_set_nmethod */,
NULL /* barrier_set_stack_chunk */,
fake_rtti.add_tag(BarrierSet::ModRef)) { } fake_rtti.add_tag(BarrierSet::ModRef)) { }
~ModRefBarrierSet() { } ~ModRefBarrierSet() { }

View file

@ -27,6 +27,7 @@
#include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp" #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp" #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
#include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahStackWatermark.hpp" #include "gc/shenandoah/shenandoahStackWatermark.hpp"
@ -45,6 +46,7 @@ ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
make_barrier_set_c1<ShenandoahBarrierSetC1>(), make_barrier_set_c1<ShenandoahBarrierSetC1>(),
make_barrier_set_c2<ShenandoahBarrierSetC2>(), make_barrier_set_c2<ShenandoahBarrierSetC2>(),
ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL, ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
new ShenandoahBarrierSetStackChunk(),
BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)), BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
_heap(heap), _heap(heap),
_satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize), _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
#include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp"
void ShenandoahBarrierSetStackChunk::encode_gc_mode(stackChunkOop chunk, OopIterator* oop_iterator) {
// Nothing to do
}
void ShenandoahBarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* oop_iterator) {
// Nothing to do
}
oop ShenandoahBarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) {
oop result = BarrierSetStackChunk::load_oop(chunk, addr);
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result);
}
oop ShenandoahBarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) {
oop result = BarrierSetStackChunk::load_oop(chunk, addr);
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result);
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETSTACKCHUNK_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETSTACKCHUNK_HPP
#include "gc/shared/barrierSetStackChunk.hpp"
class ShenandoahBarrierSetStackChunk : public BarrierSetStackChunk {
public:
virtual void encode_gc_mode(stackChunkOop chunk, OopIterator* oop_iterator) override;
virtual void decode_gc_mode(stackChunkOop chunk, OopIterator* oop_iterator) override;
virtual oop load_oop(stackChunkOop chunk, oop* addr) override;
virtual oop load_oop(stackChunkOop chunk, narrowOop* addr) override;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETSTACKCHUNK_HPP

View file

@ -25,6 +25,7 @@
#include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetNMethod.hpp" #include "gc/z/zBarrierSetNMethod.hpp"
#include "gc/z/zBarrierSetStackChunk.hpp"
#include "gc/z/zGlobals.hpp" #include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp" #include "gc/z/zHeap.inline.hpp"
#include "gc/z/zStackWatermark.hpp" #include "gc/z/zStackWatermark.hpp"
@ -46,6 +47,7 @@ ZBarrierSet::ZBarrierSet() :
make_barrier_set_c1<ZBarrierSetC1>(), make_barrier_set_c1<ZBarrierSetC1>(),
make_barrier_set_c2<ZBarrierSetC2>(), make_barrier_set_c2<ZBarrierSetC2>(),
new ZBarrierSetNMethod(), new ZBarrierSetNMethod(),
new ZBarrierSetStackChunk(),
BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {} BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
ZBarrierSetAssembler* ZBarrierSet::assembler() { ZBarrierSetAssembler* ZBarrierSet::assembler() {

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSetStackChunk.hpp"
#include "runtime/atomic.hpp"
#include "utilities/debug.hpp"
void ZBarrierSetStackChunk::encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) {
// Do nothing
}
void ZBarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) {
// Do nothing
}
oop ZBarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) {
oop obj = Atomic::load(addr);
return ZBarrier::load_barrier_on_oop_field_preloaded((volatile oop*)NULL, obj);
}
oop ZBarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) {
ShouldNotReachHere();
return NULL;
}

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_Z_ZBARRIERSETSTACKCHUNK_HPP
#define SHARE_GC_Z_ZBARRIERSETSTACKCHUNK_HPP
#include "gc/shared/barrierSetStackChunk.hpp"
#include "memory/iterator.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
class OopClosure;
class ZBarrierSetStackChunk : public BarrierSetStackChunk {
public:
virtual void encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) override;
virtual void decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) override;
virtual oop load_oop(stackChunkOop chunk, oop* addr) override;
virtual oop load_oop(stackChunkOop chunk, narrowOop* addr) override;
};
#endif // SHARE_GC_Z_ZBARRIERSETSTACKCHUNK_HPP

View file

@ -123,6 +123,12 @@ public:
virtual void do_nmethod(nmethod* nm) { ShouldNotReachHere(); } virtual void do_nmethod(nmethod* nm) { ShouldNotReachHere(); }
}; };
// Interface for applying an OopClosure to a set of oops.
class OopIterator {
public:
virtual void oops_do(OopClosure* cl) = 0;
};
enum class derived_pointer : intptr_t; enum class derived_pointer : intptr_t;
class DerivedOopClosure : public Closure { class DerivedOopClosure : public Closure {
public: public:

View file

@ -125,6 +125,7 @@ public:
static inline size_t bitmap_size_in_bits(size_t stack_size_in_words); // In bits static inline size_t bitmap_size_in_bits(size_t stack_size_in_words); // In bits
static inline size_t bitmap_size(size_t stack_size_in_words); // In words static inline size_t bitmap_size(size_t stack_size_in_words); // In words
static inline size_t gc_data_size(size_t stack_size_in_words); // In words
// Returns the size of the instance including the stack data. // Returns the size of the instance including the stack data.
virtual size_t oop_size(oop obj) const override; virtual size_t oop_size(oop obj) const override;

View file

@ -38,7 +38,7 @@
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
inline size_t InstanceStackChunkKlass::instance_size(size_t stack_size_in_words) const { inline size_t InstanceStackChunkKlass::instance_size(size_t stack_size_in_words) const {
return align_object_size(size_helper() + stack_size_in_words + bitmap_size(stack_size_in_words)); return align_object_size(size_helper() + stack_size_in_words + gc_data_size(stack_size_in_words));
} }
inline size_t InstanceStackChunkKlass::bitmap_size_in_bits(size_t stack_size_in_words) { inline size_t InstanceStackChunkKlass::bitmap_size_in_bits(size_t stack_size_in_words) {
@ -48,6 +48,11 @@ inline size_t InstanceStackChunkKlass::bitmap_size_in_bits(size_t stack_size_in_
return align_up(size_in_bits, BitsPerWord); return align_up(size_in_bits, BitsPerWord);
} }
inline size_t InstanceStackChunkKlass::gc_data_size(size_t stack_size_in_words) {
// At the moment all GCs are okay with GC data big enough to fit a bit map
return bitmap_size(stack_size_in_words);
}
inline size_t InstanceStackChunkKlass::bitmap_size(size_t stack_size_in_words) { inline size_t InstanceStackChunkKlass::bitmap_size(size_t stack_size_in_words) {
return bitmap_size_in_bits(stack_size_in_words) >> LogBitsPerWord; return bitmap_size_in_bits(stack_size_in_words) >> LogBitsPerWord;
} }

View file

@ -25,6 +25,8 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "code/compiledMethod.hpp" #include "code/compiledMethod.hpp"
#include "code/scopeDesc.hpp" #include "code/scopeDesc.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetStackChunk.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "logging/logStream.hpp" #include "logging/logStream.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
@ -36,6 +38,28 @@
#include "runtime/smallRegisterMap.inline.hpp" #include "runtime/smallRegisterMap.inline.hpp"
#include "runtime/stackChunkFrameStream.inline.hpp" #include "runtime/stackChunkFrameStream.inline.hpp"
template <typename RegisterMapT>
class FrameOopIterator : public OopIterator {
private:
const frame& _f;
const RegisterMapT* _map;
public:
FrameOopIterator(const frame& f, const RegisterMapT* map)
: _f(f),
_map(map) {
}
virtual void oops_do(OopClosure* cl) override {
if (_f.is_interpreted_frame()) {
_f.oops_interpreted_do(cl, nullptr);
} else {
OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
visitor.oops_do(&_f, _map, _f.oop_map());
}
}
};
frame stackChunkOopDesc::top_frame(RegisterMap* map) { frame stackChunkOopDesc::top_frame(RegisterMap* map) {
assert(!is_empty(), ""); assert(!is_empty(), "");
StackChunkFrameStream<ChunkFrames::Mixed> fs(this); StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
@ -172,16 +196,25 @@ public:
}; };
template <typename DerivedPointerClosureType> template <typename DerivedPointerClosureType>
class FrameToDerivedPointerClosure { class EncodeGCModeConcurrentFrameClosure {
stackChunkOop _chunk;
DerivedPointerClosureType* _cl; DerivedPointerClosureType* _cl;
public: public:
FrameToDerivedPointerClosure(DerivedPointerClosureType* cl) EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
: _cl(cl) {} : _chunk(chunk),
_cl(cl) {
}
template <ChunkFrames frame_kind, typename RegisterMapT> template <ChunkFrames frame_kind, typename RegisterMapT>
bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
f.iterate_derived_pointers(_cl, map); f.iterate_derived_pointers(_cl, map);
BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
frame fr = f.to_frame();
FrameOopIterator<RegisterMapT> iterator(fr, map);
bs_chunk->encode_gc_mode(_chunk, &iterator);
return true; return true;
} }
}; };
@ -256,52 +289,12 @@ void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
} }
DerivedPointersSupport::RelativizeClosure derived_cl; DerivedPointersSupport::RelativizeClosure derived_cl;
FrameToDerivedPointerClosure<decltype(derived_cl)> frame_cl(&derived_cl); EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
iterate_stack(&frame_cl); iterate_stack(&frame_cl);
release_relativization(); release_relativization();
} }
enum class OopKind { Narrow, Wide };
template <OopKind kind>
class CompressOopsAndBuildBitmapOopClosure : public OopClosure {
stackChunkOop _chunk;
BitMapView _bm;
void convert_oop_to_narrowOop(oop* p) {
oop obj = *p;
*p = nullptr;
*(narrowOop*)p = CompressedOops::encode(obj);
}
template <typename T>
void do_oop_work(T* p) {
BitMap::idx_t index = _chunk->bit_index_for(p);
assert(!_bm.at(index), "must not be set already");
_bm.set_bit(index);
}
public:
CompressOopsAndBuildBitmapOopClosure(stackChunkOop chunk)
: _chunk(chunk), _bm(chunk->bitmap()) {}
virtual void do_oop(oop* p) override {
if (kind == OopKind::Narrow) {
// Convert all oops to narrow before marking the oop in the bitmap.
convert_oop_to_narrowOop(p);
do_oop_work((narrowOop*)p);
} else {
do_oop_work(p);
}
}
virtual void do_oop(narrowOop* p) override {
do_oop_work(p);
}
};
template <OopKind kind>
class TransformStackChunkClosure { class TransformStackChunkClosure {
stackChunkOop _chunk; stackChunkOop _chunk;
@ -313,8 +306,10 @@ public:
DerivedPointersSupport::RelativizeClosure derived_cl; DerivedPointersSupport::RelativizeClosure derived_cl;
f.iterate_derived_pointers(&derived_cl, map); f.iterate_derived_pointers(&derived_cl, map);
CompressOopsAndBuildBitmapOopClosure<kind> cl(_chunk); BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
f.iterate_oops(&cl, map); frame fr = f.to_frame();
FrameOopIterator<RegisterMapT> iterator(fr, map);
bs_chunk->encode_gc_mode(_chunk, &iterator);
return true; return true;
} }
@ -328,13 +323,8 @@ void stackChunkOopDesc::transform() {
set_has_bitmap(true); set_has_bitmap(true);
bitmap().clear(); bitmap().clear();
if (UseCompressedOops) { TransformStackChunkClosure closure(this);
TransformStackChunkClosure<OopKind::Narrow> closure(this); iterate_stack(&closure);
iterate_stack(&closure);
} else {
TransformStackChunkClosure<OopKind::Wide> closure(this);
iterate_stack(&closure);
}
} }
template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap> template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
@ -391,33 +381,15 @@ template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::St
template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map); template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map); template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
class UncompressOopsOopClosure : public OopClosure {
public:
void do_oop(oop* p) override {
assert(UseCompressedOops, "Only needed with compressed oops");
oop obj = CompressedOops::decode(*(narrowOop*)p);
assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj));
*p = obj;
}
void do_oop(narrowOop* p) override {}
};
template <typename RegisterMapT> template <typename RegisterMapT>
void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) { void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
if (!(is_gc_mode() || requires_barriers())) { if (!(is_gc_mode() || requires_barriers())) {
return; return;
} }
if (has_bitmap() && UseCompressedOops) { BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
UncompressOopsOopClosure oop_closure; FrameOopIterator<RegisterMapT> iterator(f, map);
if (f.is_interpreted_frame()) { bs_chunk->decode_gc_mode(this, &iterator);
f.oops_interpreted_do(&oop_closure, nullptr);
} else {
OopMapDo<UncompressOopsOopClosure, DerivedOopClosure, SkipNullValue> visitor(&oop_closure, nullptr);
visitor.oops_do(&f, map, f.oop_map());
}
}
if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) { if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
DerivedPointersSupport::DerelativizeClosure derived_closure; DerivedPointersSupport::DerelativizeClosure derived_closure;
@ -441,12 +413,6 @@ void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
#ifdef ASSERT #ifdef ASSERT
template <typename P>
static inline oop safe_load(P* addr) {
oop obj = RawAccess<>::oop_load(addr);
return NativeAccess<>::oop_load(&obj);
}
class StackChunkVerifyOopsClosure : public OopClosure { class StackChunkVerifyOopsClosure : public OopClosure {
stackChunkOop _chunk; stackChunkOop _chunk;
int _count; int _count;
@ -459,8 +425,8 @@ public:
void do_oop(narrowOop* p) override { do_oop_work(p); } void do_oop(narrowOop* p) override { do_oop_work(p); }
template <typename T> inline void do_oop_work(T* p) { template <typename T> inline void do_oop_work(T* p) {
_count++; _count++;
oop obj = safe_load(p); oop obj = _chunk->load_oop(p);
assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj)); assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj));
if (_chunk->has_bitmap()) { if (_chunk->has_bitmap()) {
BitMap::idx_t index = _chunk->bit_index_for(p); BitMap::idx_t index = _chunk->bit_index_for(p);
@ -547,7 +513,7 @@ public:
T* p = _chunk->address_for_bit<T>(index); T* p = _chunk->address_for_bit<T>(index);
_count++; _count++;
oop obj = safe_load(p); oop obj = _chunk->load_oop(p);
assert(obj == nullptr || dbg_is_good_oop(obj), assert(obj == nullptr || dbg_is_good_oop(obj),
"p: " PTR_FORMAT " obj: " PTR_FORMAT " index: " SIZE_FORMAT, "p: " PTR_FORMAT " obj: " PTR_FORMAT " index: " SIZE_FORMAT,
p2i(p), p2i((oopDesc*)obj), index); p2i(p), p2i((oopDesc*)obj), index);

View file

@ -68,8 +68,6 @@ public:
inline stackChunkOop parent() const; inline stackChunkOop parent() const;
inline void set_parent(stackChunkOop value); inline void set_parent(stackChunkOop value);
template<typename P> template<typename P>
inline bool is_parent_null() const;
template<typename P>
inline void set_parent_raw(oop value); inline void set_parent_raw(oop value);
template<DecoratorSet decorators> template<DecoratorSet decorators>
inline void set_parent_access(oop value); inline void set_parent_access(oop value);
@ -94,7 +92,6 @@ public:
inline void set_max_thawing_size(int value); inline void set_max_thawing_size(int value);
inline oop cont() const; inline oop cont() const;
template<typename P> inline oop cont() const;
inline void set_cont(oop value); inline void set_cont(oop value);
template<typename P> template<typename P>
inline void set_cont_raw(oop value); inline void set_cont_raw(oop value);
@ -154,6 +151,7 @@ public:
void relativize_derived_pointers_concurrently(); void relativize_derived_pointers_concurrently();
void transform(); void transform();
inline void* gc_data() const;
inline BitMapView bitmap() const; inline BitMapView bitmap() const;
inline BitMap::idx_t bit_index_for(intptr_t* p) const; inline BitMap::idx_t bit_index_for(intptr_t* p) const;
inline intptr_t* address_for_bit(BitMap::idx_t index) const; inline intptr_t* address_for_bit(BitMap::idx_t index) const;
@ -186,6 +184,9 @@ public:
inline void copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size); inline void copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size);
inline void copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size); inline void copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size);
template <typename OopT>
inline oop load_oop(OopT* addr);
using oopDesc::print_on; using oopDesc::print_on;
void print_on(bool verbose, outputStream* st) const; void print_on(bool verbose, outputStream* st) const;

View file

@ -28,8 +28,11 @@
#include "oops/stackChunkOop.hpp" #include "oops/stackChunkOop.hpp"
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetStackChunk.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/instanceStackChunkKlass.inline.hpp" #include "oops/instanceStackChunkKlass.inline.hpp"
#include "runtime/continuationJavaClasses.inline.hpp" #include "runtime/continuationJavaClasses.inline.hpp"
#include "runtime/frame.inline.hpp" #include "runtime/frame.inline.hpp"
@ -47,8 +50,6 @@ inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
} }
inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); } inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
template<typename P>
inline bool stackChunkOopDesc::is_parent_null() const { return jdk_internal_vm_StackChunk::is_parent_null<P>(as_oop()); }
inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); } inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
template<typename P> template<typename P>
inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); } inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
@ -85,16 +86,10 @@ inline void stackChunkOopDesc::set_max_thawing_size(int value) {
jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value); jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
} }
inline oop stackChunkOopDesc::cont() const { return UseCompressedOops ? cont<narrowOop>() : cont<oop>(); /* jdk_internal_vm_StackChunk::cont(as_oop()); */ } inline oop stackChunkOopDesc::cont() const { return jdk_internal_vm_StackChunk::cont(as_oop()); }
template<typename P>
inline oop stackChunkOopDesc::cont() const {
oop obj = jdk_internal_vm_StackChunk::cont_raw<P>(as_oop());
obj = (oop)NativeAccess<>::oop_load(&obj);
return obj;
}
inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); } inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
template<typename P> template<typename P>
inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); } inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
template<DecoratorSet decorators> template<DecoratorSet decorators>
inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); } inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
@ -226,11 +221,17 @@ inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure
inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; } inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; }
inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; } inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
inline BitMapView stackChunkOopDesc::bitmap() const { inline void* stackChunkOopDesc::gc_data() const {
int stack_sz = stack_size(); int stack_sz = stack_size();
assert(stack_sz != 0, "stack should not be empty");
// The bitmap is located after the stack. // The gc data is located after the stack.
HeapWord* bitmap_addr = start_of_stack() + stack_sz; return start_of_stack() + stack_sz;
}
inline BitMapView stackChunkOopDesc::bitmap() const {
HeapWord* bitmap_addr = static_cast<HeapWord*>(gc_data());
int stack_sz = stack_size();
size_t bitmap_size_in_bits = InstanceStackChunkKlass::bitmap_size_in_bits(stack_sz); size_t bitmap_size_in_bits = InstanceStackChunkKlass::bitmap_size_in_bits(stack_sz);
BitMapView bitmap((BitMap::bm_word_t*)bitmap_addr, bitmap_size_in_bits); BitMapView bitmap((BitMap::bm_word_t*)bitmap_addr, bitmap_size_in_bits);
@ -347,6 +348,11 @@ inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t
memcpy(to, from, size << LogBytesPerWord); memcpy(to, from, size << LogBytesPerWord);
} }
template <typename OopT>
inline oop stackChunkOopDesc::load_oop(OopT* addr) {
return BarrierSet::barrier_set()->barrier_set_stack_chunk()->load_oop(this, addr);
}
inline intptr_t* stackChunkOopDesc::relative_base() const { inline intptr_t* stackChunkOopDesc::relative_base() const {
// we relativize with respect to end rather than start because GC might compact the chunk // we relativize with respect to end rather than start because GC might compact the chunk
return end_address() + frame::metadata_words; return end_address() + frame::metadata_words;

View file

@ -54,6 +54,7 @@
#include "runtime/objectMonitor.inline.hpp" #include "runtime/objectMonitor.inline.hpp"
#include "runtime/osThread.hpp" #include "runtime/osThread.hpp"
#include "runtime/signature.hpp" #include "runtime/signature.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/threads.hpp" #include "runtime/threads.hpp"
#include "runtime/threadSMR.hpp" #include "runtime/threadSMR.hpp"
#include "runtime/vframe.inline.hpp" #include "runtime/vframe.inline.hpp"
@ -634,6 +635,12 @@ JavaThread* JvmtiEnvBase::get_JavaThread_or_null(oop vthread) {
} }
JavaThread* java_thread = java_lang_Thread::thread(carrier_thread); JavaThread* java_thread = java_lang_Thread::thread(carrier_thread);
// This could be a different thread to the current one. So we need to ensure that
// processing has started before we are allowed to read the continuation oop of
// another thread, as it is a direct root of that other thread.
StackWatermarkSet::start_processing(java_thread, StackWatermarkKind::gc);
oop cont = java_lang_VirtualThread::continuation(vthread); oop cont = java_lang_VirtualThread::continuation(vthread);
assert(cont != NULL, "must be"); assert(cont != NULL, "must be");
assert(Continuation::continuation_scope(cont) == java_lang_VirtualThread::vthread_scope(), "must be"); assert(Continuation::continuation_scope(cont) == java_lang_VirtualThread::vthread_scope(), "must be");

View file

@ -134,7 +134,7 @@ public:
Method* method() override { return _jvf->method(); } Method* method() override { return _jvf->method(); }
int bci() override { return _jvf->bci(); } int bci() override { return _jvf->bci(); }
oop cont() override { return continuation() != NULL ? continuation(): ContinuationEntry::cont_oop_or_null(_cont_entry); } oop cont() override { return continuation() != NULL ? continuation(): ContinuationEntry::cont_oop_or_null(_cont_entry, _map->thread()); }
void fill_frame(int index, objArrayHandle frames_array, void fill_frame(int index, objArrayHandle frames_array,
const methodHandle& method, TRAPS) override; const methodHandle& method, TRAPS) override;

View file

@ -61,23 +61,13 @@ static jlong java_tid(JavaThread* thread) {
} }
#endif #endif
const ContinuationEntry* Continuation::last_continuation(const JavaThread* thread, oop cont_scope) {
// guarantee (thread->has_last_Java_frame(), "");
for (ContinuationEntry* entry = thread->last_continuation(); entry != nullptr; entry = entry->parent()) {
if (cont_scope == jdk_internal_vm_Continuation::scope(entry->cont_oop())) {
return entry;
}
}
return nullptr;
}
ContinuationEntry* Continuation::get_continuation_entry_for_continuation(JavaThread* thread, oop continuation) { ContinuationEntry* Continuation::get_continuation_entry_for_continuation(JavaThread* thread, oop continuation) {
if (thread == nullptr || continuation == nullptr) { if (thread == nullptr || continuation == nullptr) {
return nullptr; return nullptr;
} }
for (ContinuationEntry* entry = thread->last_continuation(); entry != nullptr; entry = entry->parent()) { for (ContinuationEntry* entry = thread->last_continuation(); entry != nullptr; entry = entry->parent()) {
if (continuation == entry->cont_oop()) { if (continuation == entry->cont_oop(thread)) {
return entry; return entry;
} }
} }
@ -99,10 +89,6 @@ bool Continuation::is_continuation_mounted(JavaThread* thread, oop continuation)
return is_on_stack(thread, get_continuation_entry_for_continuation(thread, continuation)); return is_on_stack(thread, get_continuation_entry_for_continuation(thread, continuation));
} }
bool Continuation::is_continuation_scope_mounted(JavaThread* thread, oop cont_scope) {
return is_on_stack(thread, last_continuation(thread, cont_scope));
}
// When walking the virtual stack, this method returns true // When walking the virtual stack, this method returns true
// iff the frame is a thawed continuation frame whose // iff the frame is a thawed continuation frame whose
// caller is still frozen on the h-stack. // caller is still frozen on the h-stack.
@ -193,7 +179,7 @@ frame Continuation::top_frame(const frame& callee, RegisterMap* map) {
assert(map != nullptr, ""); assert(map != nullptr, "");
ContinuationEntry* ce = get_continuation_entry_for_sp(map->thread(), callee.sp()); ContinuationEntry* ce = get_continuation_entry_for_sp(map->thread(), callee.sp());
assert(ce != nullptr, ""); assert(ce != nullptr, "");
oop continuation = ce->cont_oop(); oop continuation = ce->cont_oop(map->thread());
ContinuationWrapper cont(continuation); ContinuationWrapper cont(continuation);
return continuation_top_frame(cont, map); return continuation_top_frame(cont, map);
} }
@ -266,7 +252,7 @@ bool Continuation::is_scope_bottom(oop cont_scope, const frame& f, const Registe
if (ce == nullptr) { if (ce == nullptr) {
return false; return false;
} }
continuation = ce->cont_oop(); continuation = ce->cont_oop(map->thread());
} }
if (continuation == nullptr) { if (continuation == nullptr) {
return false; return false;

View file

@ -72,13 +72,11 @@ public:
static int prepare_thaw(JavaThread* thread, bool return_barrier); static int prepare_thaw(JavaThread* thread, bool return_barrier);
static address thaw_entry(); static address thaw_entry();
static const ContinuationEntry* last_continuation(const JavaThread* thread, oop cont_scope);
static ContinuationEntry* get_continuation_entry_for_continuation(JavaThread* thread, oop continuation); static ContinuationEntry* get_continuation_entry_for_continuation(JavaThread* thread, oop continuation);
static ContinuationEntry* get_continuation_entry_for_sp(JavaThread* thread, intptr_t* const sp); static ContinuationEntry* get_continuation_entry_for_sp(JavaThread* thread, intptr_t* const sp);
static ContinuationEntry* get_continuation_entry_for_entry_frame(JavaThread* thread, const frame& f); static ContinuationEntry* get_continuation_entry_for_entry_frame(JavaThread* thread, const frame& f);
static bool is_continuation_mounted(JavaThread* thread, oop continuation); static bool is_continuation_mounted(JavaThread* thread, oop continuation);
static bool is_continuation_scope_mounted(JavaThread* thread, oop cont_scope);
static bool is_cont_barrier_frame(const frame& f); static bool is_cont_barrier_frame(const frame& f);
static bool is_return_barrier_entry(const address pc); static bool is_return_barrier_entry(const address pc);

View file

@ -90,11 +90,6 @@ void ContinuationEntry::flush_stack_processing(JavaThread* thread) const {
maybe_flush_stack_processing(thread, (intptr_t*)((uintptr_t)entry_sp() + ContinuationEntry::size())); maybe_flush_stack_processing(thread, (intptr_t*)((uintptr_t)entry_sp() + ContinuationEntry::size()));
} }
void ContinuationEntry::setup_oopmap(OopMap* map) {
map->set_oop(VMRegImpl::stack2reg(in_bytes(cont_offset()) / VMRegImpl::stack_slot_size));
map->set_oop(VMRegImpl::stack2reg(in_bytes(chunk_offset()) / VMRegImpl::stack_slot_size));
}
#ifndef PRODUCT #ifndef PRODUCT
void ContinuationEntry::describe(FrameValues& values, int frame_no) const { void ContinuationEntry::describe(FrameValues& values, int frame_no) const {
address usp = (address)this; address usp = (address)this;

View file

@ -90,8 +90,6 @@ public:
static ByteSize parent_cont_fastpath_offset() { return byte_offset_of(ContinuationEntry, _parent_cont_fastpath); } static ByteSize parent_cont_fastpath_offset() { return byte_offset_of(ContinuationEntry, _parent_cont_fastpath); }
static ByteSize parent_held_monitor_count_offset() { return byte_offset_of(ContinuationEntry, _parent_held_monitor_count); } static ByteSize parent_held_monitor_count_offset() { return byte_offset_of(ContinuationEntry, _parent_held_monitor_count); }
static void setup_oopmap(OopMap* map);
public: public:
static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); } static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); }
@ -131,9 +129,12 @@ public:
void flush_stack_processing(JavaThread* thread) const; void flush_stack_processing(JavaThread* thread) const;
inline intptr_t* bottom_sender_sp() const; inline intptr_t* bottom_sender_sp() const;
inline oop cont_oop() const; inline oop cont_oop(const JavaThread* thread) const;
inline oop scope() const; inline oop scope(const JavaThread* thread) const;
inline static oop cont_oop_or_null(const ContinuationEntry* ce); inline static oop cont_oop_or_null(const ContinuationEntry* ce, const JavaThread* thread);
oop* cont_addr() { return (oop*)&_cont; }
oop* chunk_addr() { return (oop*)&_chunk; }
bool is_virtual_thread() const { return _flags != 0; } bool is_virtual_thread() const { return _flags != 0; }

View file

@ -27,10 +27,12 @@
#include "runtime/continuationEntry.hpp" #include "runtime/continuationEntry.hpp"
#include "oops/access.hpp" #include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "runtime/frame.hpp" #include "runtime/frame.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
#include CPU_HEADER_INLINE(continuationEntry) #include CPU_HEADER_INLINE(continuationEntry)
inline intptr_t* ContinuationEntry::bottom_sender_sp() const { inline intptr_t* ContinuationEntry::bottom_sender_sp() const {
@ -43,17 +45,29 @@ inline intptr_t* ContinuationEntry::bottom_sender_sp() const {
return sp; return sp;
} }
inline oop ContinuationEntry::cont_oop() const { inline bool is_stack_watermark_processing_started(const JavaThread* thread) {
oop snapshot = _cont; StackWatermark* sw = StackWatermarkSet::get(const_cast<JavaThread*>(thread), StackWatermarkKind::gc);
return NativeAccess<>::oop_load(&snapshot);
if (sw == nullptr) {
// No stale processing without stack watermarks
return true;
}
return sw->processing_started();
} }
inline oop ContinuationEntry::cont_oop_or_null(const ContinuationEntry* ce) { inline oop ContinuationEntry::cont_oop(const JavaThread* thread) const {
return ce == nullptr ? nullptr : ce->cont_oop(); assert(!Universe::heap()->is_in((void*)&_cont), "Should not be in the heap");
assert(is_stack_watermark_processing_started(thread != nullptr ? thread : JavaThread::current()), "Not processed");
return *(oop*)&_cont;
} }
inline oop ContinuationEntry::scope() const { inline oop ContinuationEntry::cont_oop_or_null(const ContinuationEntry* ce, const JavaThread* thread) {
return Continuation::continuation_scope(cont_oop()); return ce == nullptr ? nullptr : ce->cont_oop(thread);
}
inline oop ContinuationEntry::scope(const JavaThread* thread) const {
return Continuation::continuation_scope(cont_oop(thread));
} }
#endif // SHARE_VM_RUNTIME_CONTINUATIONENTRY_INLINE_HPP #endif // SHARE_VM_RUNTIME_CONTINUATIONENTRY_INLINE_HPP

View file

@ -1229,7 +1229,11 @@ NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) {
// old chunks are all in GC mode. // old chunks are all in GC mode.
assert(!UseG1GC, "G1 can not deal with allocating outside of eden"); assert(!UseG1GC, "G1 can not deal with allocating outside of eden");
assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking"); assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking");
ContinuationGCSupport::transform_stack_chunk(_cont.tail()); if (UseShenandoahGC) {
_cont.tail()->relativize_derived_pointers_concurrently();
} else {
ContinuationGCSupport::transform_stack_chunk(_cont.tail());
}
// For objects in the old generation we must maintain the remembered set // For objects in the old generation we must maintain the remembered set
_cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(); _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>();
} }
@ -1263,6 +1267,84 @@ inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive
return false; return false;
} }
class StackChunkAllocator : public MemAllocator {
const size_t _stack_size;
ContinuationWrapper& _continuation_wrapper;
JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
mutable bool _took_slow_path;
// Does the minimal amount of initialization needed for a TLAB allocation.
// We don't need to do a full initialization, as such an allocation need not be immediately walkable.
virtual oop initialize(HeapWord* mem) const override {
assert(_stack_size > 0, "");
assert(_stack_size <= max_jint, "");
assert(_word_size > _stack_size, "");
// zero out fields (but not the stack)
const size_t hs = oopDesc::header_size();
Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
jdk_internal_vm_StackChunk::set_sp(mem, (int)_stack_size);
return finish(mem);
}
stackChunkOop allocate_fast() const {
if (!UseTLAB) {
return nullptr;
}
HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast();
if (mem == nullptr) {
return nullptr;
}
oop obj = initialize(mem);
return stackChunkOopDesc::cast(obj);
}
public:
StackChunkAllocator(Klass* klass,
size_t word_size,
Thread* thread,
size_t stack_size,
ContinuationWrapper& continuation_wrapper,
JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
: MemAllocator(klass, word_size, thread),
_stack_size(stack_size),
_continuation_wrapper(continuation_wrapper),
_jvmti_event_collector(jvmti_event_collector),
_took_slow_path(false) {}
// Provides it's own, specialized allocation which skips instrumentation
// if the memory can be allocated without going to a slow-path.
stackChunkOop allocate() const {
// First try to allocate without any slow-paths or instrumentation.
stackChunkOop obj = allocate_fast();
if (obj != nullptr) {
return obj;
}
// Now try full-blown allocation with all expensive operations,
// including potentially safepoint operations.
_took_slow_path = true;
// Protect unhandled Loom oops
ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper);
// Can safepoint
_jvmti_event_collector->start();
// Can safepoint
return stackChunkOopDesc::cast(MemAllocator::allocate());
}
bool took_slow_path() const {
return _took_slow_path;
}
};
template <typename ConfigT> template <typename ConfigT>
stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) { stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) {
log_develop_trace(continuations)("allocate_chunk allocating new chunk"); log_develop_trace(continuations)("allocate_chunk allocating new chunk");
@ -1280,20 +1362,19 @@ stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) {
JavaThread* current = _preempt ? JavaThread::current() : _thread; JavaThread* current = _preempt ? JavaThread::current() : _thread;
assert(current == JavaThread::current(), "should be current"); assert(current == JavaThread::current(), "should be current");
StackChunkAllocator allocator(klass, size_in_words, stack_size, current); // Allocate the chunk.
oop fast_oop = allocator.try_allocate_in_existing_tlab(); //
oop chunk_oop = fast_oop; // This might safepoint while allocating, but all safepointing due to
if (chunk_oop == nullptr) { // instrumentation have been deferred. This property is important for
ContinuationWrapper::SafepointOp so(current, _cont); // some GCs, as this ensures that the allocated object is in the young
assert(_jvmti_event_collector != nullptr, ""); // generation / newly allocated memory.
_jvmti_event_collector->start(); // can safepoint StackChunkAllocator allocator(klass, size_in_words, current, stack_size, _cont, _jvmti_event_collector);
chunk_oop = allocator.allocate(); // can safepoint stackChunkOop chunk = allocator.allocate();
if (chunk_oop == nullptr) {
return nullptr; // OOME if (chunk == nullptr) {
} return nullptr; // OOME
} }
stackChunkOop chunk = stackChunkOopDesc::cast(chunk_oop);
// assert that chunk is properly initialized // assert that chunk is properly initialized
assert(chunk->stack_size() == (int)stack_size, ""); assert(chunk->stack_size() == (int)stack_size, "");
assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size); assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
@ -1309,23 +1390,36 @@ stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) {
chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk()); chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk());
chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation()); chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation());
assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), ""); #if INCLUDE_ZGC
if (UseZGC) {
assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation");
_barriers = false;
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (UseShenandoahGC) {
// Shenandoah: even continuation is good, it does not mean it is deeply good. _barriers = chunk->requires_barriers();
if (UseShenandoahGC && chunk->requires_barriers()) { } else
fast_oop = nullptr; #endif
} {
if (!allocator.took_slow_path()) {
if (fast_oop != nullptr) { // Guaranteed to be in young gen / newly allocated memory
assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation"); assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation");
} else { _barriers = false;
assert(!UseZGC || !chunk->requires_barriers(), "Allocated ZGC object requires barriers"); } else {
_barriers = !UseZGC && chunk->requires_barriers(); // Some GCs could put direct allocations in old gen for slow-path
// allocations; need to explicitly check if that was the case.
if (_barriers) { _barriers = chunk->requires_barriers();
log_develop_trace(continuations)("allocation requires barriers");
} }
} }
if (_barriers) {
log_develop_trace(continuations)("allocation requires barriers");
}
assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
return chunk; return chunk;
} }
@ -1431,15 +1525,15 @@ static inline int freeze_internal(JavaThread* current, intptr_t* const sp) {
ContinuationEntry* entry = current->last_continuation(); ContinuationEntry* entry = current->last_continuation();
oop oopCont = entry->cont_oop(); oop oopCont = entry->cont_oop(current);
assert(oopCont == current->last_continuation()->cont_oop(), ""); assert(oopCont == current->last_continuation()->cont_oop(current), "");
assert(ContinuationEntry::assert_entry_frame_laid_out(current), ""); assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
verify_continuation(oopCont); verify_continuation(oopCont);
ContinuationWrapper cont(current, oopCont); ContinuationWrapper cont(current, oopCont);
log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont)); log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
assert(entry->is_virtual_thread() == (entry->scope() == java_lang_VirtualThread::vthread_scope()), ""); assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
assert(monitors_on_stack(current) == ((current->held_monitor_count() - current->jni_monitor_count()) > 0), assert(monitors_on_stack(current) == ((current->held_monitor_count() - current->jni_monitor_count()) > 0),
"Held monitor count and locks on stack invariant: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count()); "Held monitor count and locks on stack invariant: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
@ -1523,7 +1617,7 @@ static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoi
f = f.sender(&map); f = f.sender(&map);
if (!Continuation::is_frame_in_continuation(entry, f)) { if (!Continuation::is_frame_in_continuation(entry, f)) {
oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop()); oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread));
if (scope == cont_scope) { if (scope == cont_scope) {
break; break;
} }
@ -1560,7 +1654,7 @@ static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier)
ContinuationEntry* ce = thread->last_continuation(); ContinuationEntry* ce = thread->last_continuation();
assert(ce != nullptr, ""); assert(ce != nullptr, "");
oop continuation = ce->cont_oop(); oop continuation = ce->cont_oop(thread);
assert(continuation == get_continuation(thread), ""); assert(continuation == get_continuation(thread), "");
verify_continuation(continuation); verify_continuation(continuation);
@ -1813,7 +1907,7 @@ NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
} }
// Are we thawing the last frame(s) in the continuation // Are we thawing the last frame(s) in the continuation
const bool is_last = empty && chunk->is_parent_null<typename ConfigT::OopT>(); const bool is_last = empty && chunk->parent() == NULL;
assert(!is_last || argsize == 0, ""); assert(!is_last || argsize == 0, "");
log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT, log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
@ -1897,7 +1991,6 @@ NOINLINE intptr_t* ThawBase::thaw_slow(stackChunkOop chunk, bool return_barrier)
#if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
if (UseZGC || UseShenandoahGC) { if (UseZGC || UseShenandoahGC) {
// TODO ZGC: this is where we'd want to restore color to the oops
_cont.tail()->relativize_derived_pointers_concurrently(); _cont.tail()->relativize_derived_pointers_concurrently();
} }
#endif #endif
@ -2283,13 +2376,13 @@ static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::th
ContinuationEntry* entry = thread->last_continuation(); ContinuationEntry* entry = thread->last_continuation();
assert(entry != nullptr, ""); assert(entry != nullptr, "");
oop oopCont = entry->cont_oop(); oop oopCont = entry->cont_oop(thread);
assert(!jdk_internal_vm_Continuation::done(oopCont), ""); assert(!jdk_internal_vm_Continuation::done(oopCont), "");
assert(oopCont == get_continuation(thread), ""); assert(oopCont == get_continuation(thread), "");
verify_continuation(oopCont); verify_continuation(oopCont);
assert(entry->is_virtual_thread() == (entry->scope() == java_lang_VirtualThread::vthread_scope()), ""); assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
ContinuationWrapper cont(thread, oopCont); ContinuationWrapper cont(thread, oopCont);
log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont)); log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));

View file

@ -106,8 +106,6 @@ class jdk_internal_vm_StackChunk: AllStatic {
static inline oop parent(oop chunk); static inline oop parent(oop chunk);
static inline void set_parent(oop chunk, oop value); static inline void set_parent(oop chunk, oop value);
template<typename P> template<typename P>
static inline bool is_parent_null(oop chunk); // bypasses barriers for a faster test
template<typename P>
static inline void set_parent_raw(oop chunk, oop value); static inline void set_parent_raw(oop chunk, oop value);
template<DecoratorSet decorators> template<DecoratorSet decorators>
static inline void set_parent_access(oop chunk, oop value); static inline void set_parent_access(oop chunk, oop value);
@ -131,15 +129,13 @@ class jdk_internal_vm_StackChunk: AllStatic {
static inline int maxThawingSize(oop chunk); static inline int maxThawingSize(oop chunk);
static inline void set_maxThawingSize(oop chunk, int value); static inline void set_maxThawingSize(oop chunk, int value);
// cont oop's processing is essential for the chunk's GC protocol // cont oop's processing is essential for the chunk's GC protocol
static inline oop cont(oop chunk); static inline oop cont(oop chunk);
static inline void set_cont(oop chunk, oop value); static inline void set_cont(oop chunk, oop value);
template<typename P> template<typename P>
static inline oop cont_raw(oop chunk); static inline void set_cont_raw(oop chunk, oop value);
template<typename P> template<DecoratorSet decorators>
static inline void set_cont_raw(oop chunk, oop value); static inline void set_cont_access(oop chunk, oop value);
template<DecoratorSet decorators>
static inline void set_cont_access(oop chunk, oop value);
}; };
#endif // SHARE_RUNTIME_CONTINUATIONJAVACLASSES_HPP #endif // SHARE_RUNTIME_CONTINUATIONJAVACLASSES_HPP

View file

@ -87,11 +87,6 @@ inline void jdk_internal_vm_StackChunk::set_parent(oop chunk, oop value) {
chunk->obj_field_put(_parent_offset, value); chunk->obj_field_put(_parent_offset, value);
} }
template<typename P>
inline bool jdk_internal_vm_StackChunk::is_parent_null(oop chunk) {
return (oop)RawAccess<>::oop_load(chunk->field_addr<P>(_parent_offset)) == NULL;
}
template<typename P> template<typename P>
inline void jdk_internal_vm_StackChunk::set_parent_raw(oop chunk, oop value) { inline void jdk_internal_vm_StackChunk::set_parent_raw(oop chunk, oop value) {
RawAccess<>::oop_store(chunk->field_addr<P>(_parent_offset), value); RawAccess<>::oop_store(chunk->field_addr<P>(_parent_offset), value);
@ -110,11 +105,6 @@ inline void jdk_internal_vm_StackChunk::set_cont(oop chunk, oop value) {
chunk->obj_field_put(_cont_offset, value); chunk->obj_field_put(_cont_offset, value);
} }
template<typename P>
inline oop jdk_internal_vm_StackChunk::cont_raw(oop chunk) {
return (oop)RawAccess<>::oop_load(chunk->field_addr<P>(_cont_offset));
}
template<typename P> template<typename P>
inline void jdk_internal_vm_StackChunk::set_cont_raw(oop chunk, oop value) { inline void jdk_internal_vm_StackChunk::set_cont_raw(oop chunk, oop value) {
RawAccess<>::oop_store(chunk->field_addr<P>(_cont_offset), value); RawAccess<>::oop_store(chunk->field_addr<P>(_cont_offset), value);

View file

@ -43,9 +43,9 @@ ContinuationWrapper::ContinuationWrapper(const RegisterMap* map)
_continuation(map->stack_chunk()->cont()) _continuation(map->stack_chunk()->cont())
{ {
assert(oopDesc::is_oop(_continuation),"Invalid cont: " INTPTR_FORMAT, p2i((void*)_continuation)); assert(oopDesc::is_oop(_continuation),"Invalid cont: " INTPTR_FORMAT, p2i((void*)_continuation));
assert(_entry == nullptr || _continuation == _entry->cont_oop(), assert(_entry == nullptr || _continuation == _entry->cont_oop(map->thread()),
"cont: " INTPTR_FORMAT " entry: " INTPTR_FORMAT " entry_sp: " INTPTR_FORMAT, "cont: " INTPTR_FORMAT " entry: " INTPTR_FORMAT " entry_sp: " INTPTR_FORMAT,
p2i( (oopDesc*)_continuation), p2i((oopDesc*)_entry->cont_oop()), p2i(entrySP())); p2i( (oopDesc*)_continuation), p2i((oopDesc*)_entry->cont_oop(map->thread())), p2i(entrySP()));
disallow_safepoint(); disallow_safepoint();
read(); read();
} }

View file

@ -79,7 +79,7 @@ public:
void done() { void done() {
allow_safepoint(); // must be done first allow_safepoint(); // must be done first
_continuation = nullptr; _continuation = nullptr;
_tail = (stackChunkOop)badOop; *reinterpret_cast<intptr_t*>(&_tail) = badHeapOopVal;
} }
class SafepointOp : public StackObj { class SafepointOp : public StackObj {
@ -149,8 +149,6 @@ inline ContinuationWrapper::ContinuationWrapper(JavaThread* thread, oop continua
{ {
assert(oopDesc::is_oop(_continuation), assert(oopDesc::is_oop(_continuation),
"Invalid continuation object: " INTPTR_FORMAT, p2i((void*)_continuation)); "Invalid continuation object: " INTPTR_FORMAT, p2i((void*)_continuation));
assert(_continuation == _entry->cont_oop(), "cont: " INTPTR_FORMAT " entry: " INTPTR_FORMAT " entry_sp: "
INTPTR_FORMAT, p2i((oopDesc*)_continuation), p2i((oopDesc*)_entry->cont_oop()), p2i(entrySP()));
disallow_safepoint(); disallow_safepoint();
read(); read();
} }

View file

@ -1382,6 +1382,17 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
if (jvmti_thread_state() != NULL) { if (jvmti_thread_state() != NULL) {
jvmti_thread_state()->oops_do(f, cf); jvmti_thread_state()->oops_do(f, cf);
} }
// The continuation oops are really on the stack. But there is typically at most
// one of those per thread, so we handle them here in the oops_do_no_frames part
// so that we don't have to sprinkle as many stack watermark checks where these
// oops are used. We just need to make sure the thread has started processing.
ContinuationEntry* entry = _cont_entry;
while (entry != nullptr) {
f->do_oop((oop*)entry->cont_addr());
f->do_oop((oop*)entry->chunk_addr());
entry = entry->parent();
}
} }
void JavaThread::oops_do_frames(OopClosure* f, CodeBlobClosure* cf) { void JavaThread::oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {

View file

@ -41,18 +41,6 @@
class RegisterMap; class RegisterMap;
class SmallRegisterMap; class SmallRegisterMap;
template <typename OopT>
static oop read_oop_local(OopT* p) {
// We can't do a native access directly from p because load barriers
// may self-heal. If that happens on a base pointer for compressed oops,
// then there will be a crash later on. Only the stack watermark API is
// allowed to heal oops, because it heals derived pointers before their
// corresponding base pointers.
oop obj = RawAccess<>::oop_load(p);
return NativeAccess<>::oop_load(&obj);
}
template StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv); template StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv);
template StackValue* StackValue::create_stack_value(const frame* fr, const SmallRegisterMap* reg_map, ScopeValue* sv); template StackValue* StackValue::create_stack_value(const frame* fr, const SmallRegisterMap* reg_map, ScopeValue* sv);
@ -61,11 +49,84 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMapT*
return create_stack_value(sv, stack_value_address(fr, reg_map, sv), reg_map); return create_stack_value(sv, stack_value_address(fr, reg_map, sv), reg_map);
} }
template StackValue* StackValue::create_stack_value(ScopeValue*, address, const RegisterMap*); static oop oop_from_oop_location(stackChunkOop chunk, void* addr) {
template StackValue* StackValue::create_stack_value(ScopeValue*, address, const SmallRegisterMap*); if (addr == nullptr) {
return nullptr;
}
if (UseCompressedOops) {
// When compressed oops is enabled, an oop location may
// contain narrow oop values - we deal with that here
if (chunk != NULL && chunk->has_bitmap()) {
// Transformed stack chunk with narrow oops
return chunk->load_oop((narrowOop*)addr);
}
#ifdef _LP64
if (CompressedOops::is_base(*(void**)addr)) {
// Compiled code may produce decoded oop = narrow_oop_base
// when a narrow oop implicit null check is used.
// The narrow_oop_base could be NULL or be the address
// of the page below heap. Use NULL value for both cases.
return nullptr;
}
#endif
}
if (chunk != NULL) {
// Load oop from chunk
return chunk->load_oop((oop*)addr);
}
// Load oop from stack
return *(oop*)addr;
}
static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) {
assert(UseCompressedOops, "Narrow oops should not exist");
assert(addr != nullptr, "Not expecting null address");
narrowOop* narrow_addr;
if (is_register) {
// The callee has no clue whether the register holds an int,
// long or is unused. He always saves a long. Here we know
// a long was saved, but we only want an int back. Narrow the
// saved long to the int that the JVM wants. We can't just
// use narrow_oop_cast directly, because we don't know what
// the high bits of the value might be.
narrow_addr = ((narrowOop*)addr) BIG_ENDIAN_ONLY(+ 1);
} else {
narrow_addr = (narrowOop*)addr;
}
if (chunk != NULL) {
// Load oop from chunk
return chunk->load_oop(narrow_addr);
}
// Load oop from stack
return CompressedOops::decode(*narrow_addr);
}
StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk, void* addr) {
oop val = oop_from_oop_location(chunk, addr);
assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
p2i(addr), chunk != NULL, chunk != NULL && chunk->has_bitmap() && UseCompressedOops);
Handle h(Thread::current(), val); // Wrap a handle around the oop
return new StackValue(h);
}
StackValue* StackValue::create_stack_value_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) {
oop val = oop_from_narrowOop_location(chunk, addr, is_register);
assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
p2i(addr), chunk != NULL, chunk != NULL && chunk->has_bitmap() && UseCompressedOops);
Handle h(Thread::current(), val); // Wrap a handle around the oop
return new StackValue(h);
}
template<typename RegisterMapT> template<typename RegisterMapT>
StackValue* StackValue::create_stack_value(ScopeValue* sv, address value_addr, const RegisterMapT* reg_map) { StackValue* StackValue::create_stack_value(ScopeValue* sv, address value_addr, const RegisterMapT* reg_map) {
stackChunkOop chunk = reg_map->stack_chunk()();
if (sv->is_location()) { if (sv->is_location()) {
// Stack or register value // Stack or register value
Location loc = ((LocationValue *)sv)->location(); Location loc = ((LocationValue *)sv)->location();
@ -111,51 +172,11 @@ StackValue* StackValue::create_stack_value(ScopeValue* sv, address value_addr, c
case Location::lng: case Location::lng:
// Long value in an aligned adjacent pair // Long value in an aligned adjacent pair
return new StackValue(*(intptr_t*)value_addr); return new StackValue(*(intptr_t*)value_addr);
case Location::narrowoop: { case Location::narrowoop:
assert(UseCompressedOops, ""); return create_stack_value_from_narrowOop_location(reg_map->stack_chunk()(), (void*)value_addr, loc.is_register());
union { intptr_t p; narrowOop noop;} value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
if (loc.is_register()) {
// The callee has no clue whether the register holds an int,
// long or is unused. He always saves a long. Here we know
// a long was saved, but we only want an int back. Narrow the
// saved long to the int that the JVM wants. We can't just
// use narrow_oop_cast directly, because we don't know what
// the high bits of the value might be.
static_assert(sizeof(narrowOop) == sizeof(juint), "size mismatch");
juint narrow_value = (juint) *(julong*)value_addr;
value.noop = CompressedOops::narrow_oop_cast(narrow_value);
} else {
value.noop = *(narrowOop*) value_addr;
}
// Decode narrowoop
oop val = read_oop_local(&value.noop);
Handle h(Thread::current(), val); // Wrap a handle around the oop
return new StackValue(h);
}
#endif #endif
case Location::oop: { case Location::oop:
oop val; return create_stack_value_from_oop_location(reg_map->stack_chunk()(), (void*)value_addr);
if (reg_map->in_cont() && reg_map->stack_chunk()->has_bitmap() && UseCompressedOops) {
val = CompressedOops::decode(*(narrowOop*)value_addr);
} else {
val = *(oop *)value_addr;
}
#ifdef _LP64
if (CompressedOops::is_base(val)) {
// Compiled code may produce decoded oop = narrow_oop_base
// when a narrow oop implicit null check is used.
// The narrow_oop_base could be NULL or be the address
// of the page below heap. Use NULL value for both cases.
val = (oop)NULL;
}
#endif
val = read_oop_local(&val);
assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
p2i(value_addr), reg_map->in_cont(), reg_map->in_cont() && reg_map->stack_chunk()->has_bitmap() && UseCompressedOops);
Handle h(Thread::current(), val); // Wrap a handle around the oop
return new StackValue(h);
}
case Location::addr: { case Location::addr: {
loc.print_on(tty); loc.print_on(tty);
ShouldNotReachHere(); // both C1 and C2 now inline jsrs ShouldNotReachHere(); // both C1 and C2 now inline jsrs

View file

@ -108,6 +108,9 @@ class StackValue : public ResourceObj {
} }
} }
static StackValue* create_stack_value_from_oop_location(stackChunkOop chunk, void* addr);
static StackValue* create_stack_value_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register);
static BasicLock* resolve_monitor_lock(const frame* fr, Location location); static BasicLock* resolve_monitor_lock(const frame* fr, Location location);
template<typename RegisterMapT> template<typename RegisterMapT>

View file

@ -332,22 +332,11 @@ static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_
const intptr_t* const addr, const intptr_t* const addr,
stackChunkOop chunk) { stackChunkOop chunk) {
assert(index >= 0 && assert(index >= 0 && index < oop_mask.number_of_entries(), "invariant");
index < oop_mask.number_of_entries(), "invariant");
// categorize using oop_mask // categorize using oop_mask
if (oop_mask.is_oop(index)) { if (oop_mask.is_oop(index)) {
oop obj = NULL; return StackValue::create_stack_value_from_oop_location(chunk, (void*)addr);
if (addr != NULL) {
if (chunk != NULL) {
obj = (chunk->has_bitmap() && UseCompressedOops) ? (oop)HeapAccess<>::oop_load((narrowOop*)addr) : HeapAccess<>::oop_load((oop*)addr);
} else {
obj = *(oop*)addr;
}
}
// reference (oop) "r"
Handle h(Thread::current(), obj);
return new StackValue(h);
} }
// value (integer) "v" // value (integer) "v"
return new StackValue(addr != NULL ? *addr : 0); return new StackValue(addr != NULL ? *addr : 0);

View file

@ -42,7 +42,7 @@ inline oop vframeStreamCommon::continuation() const {
if (_reg_map.cont() != NULL) { if (_reg_map.cont() != NULL) {
return _reg_map.cont(); return _reg_map.cont();
} else if (_cont_entry != NULL) { } else if (_cont_entry != NULL) {
return _cont_entry->cont_oop(); return _cont_entry->cont_oop(_reg_map.thread());
} else { } else {
return NULL; return NULL;
} }
@ -82,12 +82,13 @@ inline void vframeStreamCommon::next() {
if (Continuation::is_continuation_enterSpecial(_frame)) { if (Continuation::is_continuation_enterSpecial(_frame)) {
assert(!_reg_map.in_cont(), ""); assert(!_reg_map.in_cont(), "");
assert(_cont_entry != NULL, ""); assert(_cont_entry != NULL, "");
assert(_cont_entry->cont_oop() != NULL, "_cont: " INTPTR_FORMAT, p2i(_cont_entry)); // Reading oops are only safe if process_frames() is true, and we fix the oops.
assert(!_reg_map.process_frames() || _cont_entry->cont_oop(_reg_map.thread()) != NULL, "_cont: " INTPTR_FORMAT, p2i(_cont_entry));
is_enterSpecial_frame = true; is_enterSpecial_frame = true;
// TODO: handle ShowCarrierFrames // TODO: handle ShowCarrierFrames
if (_cont_entry->is_virtual_thread() || if (_cont_entry->is_virtual_thread() ||
(_continuation_scope.not_null() && _cont_entry->scope() == _continuation_scope())) { (_continuation_scope.not_null() && _cont_entry->scope(_reg_map.thread()) == _continuation_scope())) {
_mode = at_end_mode; _mode = at_end_mode;
break; break;
} }