mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-25 22:04:51 +02:00
8129417: Oop iteration clean-up to remove oop_ms_follow_contents
Reviewed-by: pliden, ehelin
This commit is contained in:
parent
72c1fe89b2
commit
dada9bc32c
39 changed files with 362 additions and 454 deletions
|
@ -66,7 +66,8 @@ class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
|
|||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
void do_cld_nv(ClassLoaderData* cld);
|
||||
};
|
||||
|
||||
class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
|
||||
|
|
|
@ -50,11 +50,11 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
|
|||
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
do_cld_nv(cld);
|
||||
}
|
||||
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
|
|
|
@ -702,7 +702,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,
|
|||
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
||||
oop(bottom)) && \
|
||||
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
||||
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
||||
size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
bottom += _cfls->adjustObjectSize(word_sz); \
|
||||
} else { \
|
||||
bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
|
||||
|
@ -729,7 +729,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
|
|||
!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
|
||||
oop(bottom)) && \
|
||||
!_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
|
||||
size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
|
||||
size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
bottom += _cfls->adjustObjectSize(word_sz); \
|
||||
} else { \
|
||||
bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
|
||||
|
|
|
@ -4623,7 +4623,7 @@ void CMSParRemarkTask::work(uint worker_id) {
|
|||
ResourceMark rm;
|
||||
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
|
||||
for (int i = 0; i < array->length(); i++) {
|
||||
par_mrias_cl.do_class_loader_data(array->at(i));
|
||||
par_mrias_cl.do_cld_nv(array->at(i));
|
||||
}
|
||||
|
||||
// We don't need to keep track of new CLDs anymore.
|
||||
|
@ -5199,7 +5199,7 @@ void CMSCollector::do_remark_non_parallel() {
|
|||
ResourceMark rm;
|
||||
GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
|
||||
for (int i = 0; i < array->length(); i++) {
|
||||
mrias_cl.do_class_loader_data(array->at(i));
|
||||
mrias_cl.do_cld_nv(array->at(i));
|
||||
}
|
||||
|
||||
// We don't need to keep track of new CLDs anymore.
|
||||
|
@ -6324,12 +6324,12 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|||
// objArrays are precisely marked; restrict scanning
|
||||
// to dirty cards only.
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure, mr));
|
||||
p->oop_iterate_size(_scanningClosure, mr));
|
||||
} else {
|
||||
// A non-array may have been imprecisely marked; we need
|
||||
// to scan object in its entirety.
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure));
|
||||
p->oop_iterate_size(_scanningClosure));
|
||||
}
|
||||
#ifdef ASSERT
|
||||
size_t direct_size =
|
||||
|
@ -6417,7 +6417,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
|
|||
// Note that we do not yield while we iterate over
|
||||
// the interior oops of p, pushing the relevant ones
|
||||
// on our marking stack.
|
||||
size_t size = p->oop_iterate(_scanning_closure);
|
||||
size_t size = p->oop_iterate_size(_scanning_closure);
|
||||
do_yield_check();
|
||||
// Observe that below, we do not abandon the preclean
|
||||
// phase as soon as we should; rather we empty the
|
||||
|
|
|
@ -1143,7 +1143,7 @@ void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
|
|||
while (curr < end) {
|
||||
Prefetch::read(curr, interval);
|
||||
oop obj = oop(curr);
|
||||
int size = obj->oop_iterate(&cl);
|
||||
int size = obj->oop_iterate_size(&cl);
|
||||
assert(size == obj->size(), "sanity");
|
||||
curr += size;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
|||
assert(rp != NULL, "should be non-NULL");
|
||||
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
|
||||
|
||||
GenMarkSweep::_ref_processor = rp;
|
||||
GenMarkSweep::set_ref_processor(rp);
|
||||
rp->setup_policy(clear_all_softrefs);
|
||||
|
||||
// When collecting the permanent generation Method*s may be moving,
|
||||
|
@ -108,7 +108,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
|||
JvmtiExport::gc_epilogue();
|
||||
|
||||
// refs processing: clean slate
|
||||
GenMarkSweep::_ref_processor = NULL;
|
||||
GenMarkSweep::set_ref_processor(NULL);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
|||
// or it was allocated after marking finished, then we add it. Otherwise
|
||||
// we can safely ignore the object.
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
|
||||
oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
|
||||
} else {
|
||||
oop_size = _hr->block_size(cur);
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ void ImmutableSpace::oop_iterate(ExtendedOopClosure* cl) {
|
|||
HeapWord* t = end();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(cl);
|
||||
obj_addr += oop(obj_addr)->oop_iterate_size(cl);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -213,15 +213,6 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
|||
return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
|
|
|
@ -134,7 +134,6 @@ class MutableSpace: public ImmutableSpace {
|
|||
bool cas_deallocate(HeapWord *obj, size_t size);
|
||||
|
||||
// Iteration.
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void oop_iterate_no_header(OopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ CollectorCounters* PSMarkSweep::_counters = NULL;
|
|||
|
||||
void PSMarkSweep::initialize() {
|
||||
MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
|
||||
_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
|
||||
set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
|
||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
|
|||
// hook up weak ref data so it can be used during Mark-Sweep
|
||||
assert(ref_processor() == NULL, "no stomping");
|
||||
assert(rp != NULL, "should be non-NULL");
|
||||
_ref_processor = rp;
|
||||
set_ref_processor(rp);
|
||||
rp->setup_policy(clear_all_softrefs);
|
||||
|
||||
GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
|
||||
|
@ -136,7 +136,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
|
|||
}
|
||||
|
||||
// refs processing: clean slate
|
||||
_ref_processor = NULL;
|
||||
set_ref_processor(NULL);
|
||||
|
||||
// Update heap occupancy information which is used as
|
||||
// input to soft ref clearing policy at the next gc.
|
||||
|
|
|
@ -28,11 +28,20 @@
|
|||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/specialized_oop_closures.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
uint MarkSweep::_total_invocations = 0;
|
||||
|
||||
|
@ -50,176 +59,101 @@ SerialOldTracer* MarkSweep::_gc_tracer = NULL;
|
|||
|
||||
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
|
||||
|
||||
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
|
||||
|
||||
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure);
|
||||
CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
|
||||
|
||||
template <typename T>
|
||||
void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
// We must enqueue the object before it is marked
|
||||
// as we otherwise can't read the object's age.
|
||||
G1StringDedup::enqueue_from_mark(obj);
|
||||
}
|
||||
#endif
|
||||
// some marks may contain information we need to preserve so we store them away
|
||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||
markOop mark = obj->mark();
|
||||
obj->set_mark(markOopDesc::prototype()->set_marked());
|
||||
|
||||
void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
|
||||
if (mark->must_be_preserved(obj)) {
|
||||
preserve_mark(obj, mark);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_klass(Klass* klass) {
|
||||
oop op = klass->klass_holder();
|
||||
MarkSweep::mark_and_push(&op);
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_cld(ClassLoaderData* cld) {
|
||||
MarkSweep::follow_cld_closure.do_cld(cld);
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert(obj != NULL, "can't follow the content of NULL object");
|
||||
MarkSweep::follow_klass(this);
|
||||
template <typename T>
|
||||
inline void MarkAndPushClosure::do_oop_nv(T* p) { MarkSweep::mark_and_push(p); }
|
||||
void MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
void MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
inline bool MarkAndPushClosure::do_metadata_nv() { return true; }
|
||||
bool MarkAndPushClosure::do_metadata() { return do_metadata_nv(); }
|
||||
inline void MarkAndPushClosure::do_klass_nv(Klass* k) { MarkSweep::follow_klass(k); }
|
||||
void MarkAndPushClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
inline void MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) { MarkSweep::follow_cld(cld); }
|
||||
void MarkAndPushClosure::do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
|
||||
oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::mark_and_push_closure);
|
||||
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
||||
mark_and_push(p);
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_ms_follow_contents(obj);
|
||||
void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
// Follow the klass field in the mirror
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
if (klass != NULL) {
|
||||
// An anonymous class doesn't have its own class loader, so the call
|
||||
// to follow_klass will mark and push its java mirror instead of the
|
||||
// class loader. When handling the java mirror for an anonymous class
|
||||
// we need to make sure its class loader data is claimed, this is done
|
||||
// by calling follow_class_loader explicitly. For non-anonymous classes
|
||||
// the call to follow_class_loader is made when the class loader itself
|
||||
// is handled.
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
MarkSweep::follow_class_loader(klass->class_loader_data());
|
||||
inline void MarkSweep::follow_array(objArrayOop array) {
|
||||
MarkSweep::follow_klass(array->klass());
|
||||
// Don't push empty arrays to avoid unnecessary work.
|
||||
if (array->length() > 0) {
|
||||
MarkSweep::push_objarray(array, 0);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_object(oop obj) {
|
||||
assert(obj->is_gc_marked(), "should be marked");
|
||||
if (obj->is_objArray()) {
|
||||
// Handle object arrays explicitly to allow them to
|
||||
// be split into chunks if needed.
|
||||
MarkSweep::follow_array((objArrayOop)obj);
|
||||
} else {
|
||||
MarkSweep::follow_klass(klass);
|
||||
}
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
|
||||
oop_oop_iterate_statics<true>(obj, &MarkSweep::mark_and_push_closure);
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_ms_follow_contents(obj);
|
||||
|
||||
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
|
||||
|
||||
// We must NULL check here, since the class loader
|
||||
// can be found before the loader data has been set up.
|
||||
if(loader_data != NULL) {
|
||||
MarkSweep::follow_class_loader(loader_data);
|
||||
obj->oop_iterate(&mark_and_push_closure);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!referent->is_gc_marked() &&
|
||||
MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
|
||||
// reference was discovered, referent will be traversed later
|
||||
klass->InstanceKlass::oop_ms_follow_contents(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
PTR_FORMAT, p2i(discovered_addr));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(discovered_addr);
|
||||
}
|
||||
// treat next as normal oop. next is a link in the reference queue.
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process next as normal " PTR_FORMAT, p2i(next_addr));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(next_addr);
|
||||
klass->InstanceKlass::oop_ms_follow_contents(obj);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(this, obj);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(this, obj);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ms_follow_contents_specialized(oop obj, int index) {
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
void MarkSweep::follow_array_chunk(objArrayOop array, int index) {
|
||||
const int len = array->length();
|
||||
const int beg_index = index;
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
|
||||
const int end_index = beg_index + stride;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
MarkSweep::mark_and_push<T>(e);
|
||||
}
|
||||
array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
|
||||
|
||||
if (end_index < len) {
|
||||
MarkSweep::push_objarray(a, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
MarkSweep::follow_klass(this);
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(obj, 0);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
}
|
||||
|
||||
void MarkSweep::follow_array(objArrayOop array, int index) {
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(array, index);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(array, index);
|
||||
MarkSweep::push_objarray(array, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -233,7 +167,7 @@ void MarkSweep::follow_stack() {
|
|||
// Process ObjArrays one at a time to avoid marking stack bloat.
|
||||
if (!_objarray_stack.is_empty()) {
|
||||
ObjArrayTask task = _objarray_stack.pop();
|
||||
follow_array(objArrayOop(task.obj()), task.index());
|
||||
follow_array_chunk(objArrayOop(task.obj()), task.index());
|
||||
}
|
||||
} while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
|
||||
}
|
||||
|
@ -242,6 +176,24 @@ MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
|
|||
|
||||
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
||||
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
}
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
|
||||
|
||||
void PreservedMark::adjust_pointer() {
|
||||
MarkSweep::adjust_pointer(&_obj);
|
||||
}
|
||||
|
@ -266,6 +218,11 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
|||
}
|
||||
}
|
||||
|
||||
void MarkSweep::set_ref_processor(ReferenceProcessor* rp) {
|
||||
_ref_processor = rp;
|
||||
mark_and_push_closure.set_ref_processor(_ref_processor);
|
||||
}
|
||||
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
|
||||
|
||||
template <typename T>
|
||||
|
@ -405,3 +362,6 @@ int TypeArrayKlass::oop_ms_adjust_pointers(oop obj) {
|
|||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
// Generate MS specialized oop_oop_iterate functions.
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
|
||||
|
|
|
@ -49,6 +49,7 @@ class STWGCTimer;
|
|||
|
||||
// declared at end
|
||||
class PreservedMark;
|
||||
class MarkAndPushClosure;
|
||||
|
||||
class MarkSweep : AllStatic {
|
||||
//
|
||||
|
@ -60,13 +61,6 @@ class MarkSweep : AllStatic {
|
|||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class MarkAndPushClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
public:
|
||||
virtual void do_void();
|
||||
|
@ -146,6 +140,7 @@ class MarkSweep : AllStatic {
|
|||
|
||||
// Reference Processing
|
||||
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||
static void set_ref_processor(ReferenceProcessor* rp);
|
||||
|
||||
// Archive Object handling
|
||||
static inline bool is_archive_object(oop object);
|
||||
|
@ -153,34 +148,55 @@ class MarkSweep : AllStatic {
|
|||
static STWGCTimer* gc_timer() { return _gc_timer; }
|
||||
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
|
||||
|
||||
// Call backs for marking
|
||||
static void mark_object(oop obj);
|
||||
// Mark pointer and follow contents. Empty marking stack afterwards.
|
||||
template <class T> static inline void follow_root(T* p);
|
||||
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static void mark_and_push(T* p);
|
||||
|
||||
static inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
static void follow_object(oop obj);
|
||||
|
||||
static void follow_array(objArrayOop array, int index);
|
||||
|
||||
static void follow_klass(Klass* klass);
|
||||
|
||||
static void follow_class_loader(ClassLoaderData* cld);
|
||||
|
||||
static int adjust_pointers(oop obj);
|
||||
|
||||
static void preserve_mark(oop p, markOop mark);
|
||||
// Save the mark word so it can be restored later
|
||||
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
||||
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
||||
|
||||
static int adjust_pointers(oop obj);
|
||||
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
static void follow_klass(Klass* klass);
|
||||
|
||||
static void follow_cld(ClassLoaderData* cld);
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p);
|
||||
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static void mark_and_push(T* p);
|
||||
|
||||
private:
|
||||
// Call backs for marking
|
||||
static void mark_object(oop obj);
|
||||
// Mark pointer and follow contents. Empty marking stack afterwards.
|
||||
template <class T> static inline void follow_root(T* p);
|
||||
|
||||
static inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
static void follow_object(oop obj);
|
||||
|
||||
static void follow_array(objArrayOop array);
|
||||
|
||||
static void follow_array_chunk(objArrayOop array, int index);
|
||||
};
|
||||
|
||||
class MarkAndPushClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
virtual bool do_metadata();
|
||||
bool do_metadata_nv();
|
||||
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
|
||||
virtual void do_cld(ClassLoaderData* cld);
|
||||
void do_cld_nv(ClassLoaderData* cld);
|
||||
|
||||
void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
|
||||
};
|
||||
|
||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||
|
|
|
@ -26,38 +26,13 @@
|
|||
#define SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
|
||||
|
||||
#include "gc/serial/markSweep.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
// We must enqueue the object before it is marked
|
||||
// as we otherwise can't read the object's age.
|
||||
G1StringDedup::enqueue_from_mark(obj);
|
||||
}
|
||||
#endif
|
||||
// some marks may contain information we need to preserve so we store them away
|
||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||
markOop mark = obj->mark();
|
||||
obj->set_mark(markOopDesc::prototype()->set_marked());
|
||||
|
||||
if (mark->must_be_preserved(obj)) {
|
||||
preserve_mark(obj, mark);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool MarkSweep::is_archive_object(oop object) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
return (G1MarkSweep::archive_check_enabled() &&
|
||||
|
@ -67,51 +42,6 @@ inline bool MarkSweep::is_archive_object(oop object) {
|
|||
#endif
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_klass(Klass* klass) {
|
||||
oop op = klass->klass_holder();
|
||||
MarkSweep::mark_and_push(&op);
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_object(oop obj) {
|
||||
assert(obj->is_gc_marked(), "should be marked");
|
||||
|
||||
obj->ms_follow_contents();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
}
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
inline int MarkSweep::adjust_pointers(oop obj) {
|
||||
return obj->ms_adjust_pointers();
|
||||
}
|
||||
|
@ -139,8 +69,4 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
|||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
||||
mark_and_push(p);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
|
||||
|
|
|
@ -237,7 +237,7 @@ void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
|
|||
HeapWord* bottom, \
|
||||
HeapWord* top, \
|
||||
ClosureType* cl) { \
|
||||
bottom += oop(bottom)->oop_iterate(cl, mr); \
|
||||
bottom += oop(bottom)->oop_iterate_size(cl, mr); \
|
||||
if (bottom < top) { \
|
||||
HeapWord* next_obj = bottom + oop(bottom)->size(); \
|
||||
while (next_obj < top) { \
|
||||
|
@ -508,7 +508,7 @@ bool Space::obj_is_alive(const HeapWord* p) const {
|
|||
HeapWord* t = mr.end(); \
|
||||
while (obj_addr < t) { \
|
||||
assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
|
||||
obj_addr += oop(obj_addr)->oop_iterate(blk); \
|
||||
obj_addr += oop(obj_addr)->oop_iterate_size(blk); \
|
||||
} \
|
||||
}
|
||||
|
||||
|
@ -523,7 +523,7 @@ void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
|
|||
HeapWord* t = top();
|
||||
// Could call objects iterate, but this is easier.
|
||||
while (obj_addr < t) {
|
||||
obj_addr += oop(obj_addr)->oop_iterate(blk);
|
||||
obj_addr += oop(obj_addr)->oop_iterate_size(blk);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -578,7 +578,7 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
|
|||
Prefetch::write(p, interval); \
|
||||
debug_only(HeapWord* prev = p); \
|
||||
oop m = oop(p); \
|
||||
p += m->oop_iterate(blk); \
|
||||
p += m->oop_iterate_size(blk); \
|
||||
} \
|
||||
} while (t < top()); \
|
||||
\
|
||||
|
|
|
@ -42,6 +42,8 @@ class OopsInGenClosure;
|
|||
class ScanClosure;
|
||||
class FastScanClosure;
|
||||
class FilteringClosure;
|
||||
// MarkSweep
|
||||
class MarkAndPushClosure;
|
||||
// ParNew
|
||||
class ParScanWithBarrierClosure;
|
||||
class ParScanWithoutBarrierClosure;
|
||||
|
@ -87,6 +89,9 @@ class NoHeaderExtendedOopClosure;
|
|||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f) \
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
|
||||
|
||||
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f) \
|
||||
f(MarkAndPushClosure,_nv)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f) \
|
||||
f(MarkRefsIntoAndScanClosure,_nv) \
|
||||
|
@ -101,10 +106,12 @@ class NoHeaderExtendedOopClosure;
|
|||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) \
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f) \
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f) \
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)
|
||||
#else // INCLUDE_ALL_GCS
|
||||
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)
|
||||
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) \
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ void ParallelTaskTerminator::reset_for_reuse() {
|
|||
|
||||
#ifdef ASSERT
|
||||
bool ObjArrayTask::is_valid() const {
|
||||
return _obj != NULL && _obj->is_objArray() && _index > 0 &&
|
||||
return _obj != NULL && _obj->is_objArray() && _index >= 0 &&
|
||||
_index < objArrayOop(_obj)->length();
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
|
|
@ -61,7 +61,7 @@ class ExtendedOopClosure : public OopClosure {
|
|||
//
|
||||
// 1) do_klass on the header klass pointer.
|
||||
// 2) do_klass on the klass pointer in the mirrors.
|
||||
// 3) do_class_loader_data on the class loader data in class loaders.
|
||||
// 3) do_cld on the class loader data in class loaders.
|
||||
//
|
||||
// The virtual (without suffix) and the non-virtual (with _nv suffix) need
|
||||
// to be updated together, or else the devirtualization will break.
|
||||
|
@ -71,13 +71,14 @@ class ExtendedOopClosure : public OopClosure {
|
|||
// ExtendedOopClosures that don't need to walk the metadata.
|
||||
// Currently, only CMS and G1 need these.
|
||||
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
bool do_metadata_nv() { return false; }
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
|
||||
virtual void do_klass(Klass* k) { do_klass_nv(k); }
|
||||
void do_klass_nv(Klass* k) { ShouldNotReachHere(); }
|
||||
virtual void do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
|
||||
void do_cld_nv(ClassLoaderData* cld) { ShouldNotReachHere(); }
|
||||
virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
|
||||
// True iff this closure may be safely applied more than once to an oop
|
||||
// location without an intervening "major reset" (like the end of a GC).
|
||||
|
@ -180,13 +181,14 @@ class MetadataAwareOopClosure: public ExtendedOopClosure {
|
|||
_klass_closure.initialize(this);
|
||||
}
|
||||
|
||||
bool do_metadata_nv() { return true; }
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
inline bool do_metadata_nv() { return true; }
|
||||
|
||||
virtual void do_klass(Klass* k);
|
||||
void do_klass_nv(Klass* k);
|
||||
virtual void do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld);
|
||||
void do_cld_nv(ClassLoaderData* cld);
|
||||
virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
|
||||
};
|
||||
|
||||
// ObjectClosure is used for iterating through an object space
|
||||
|
@ -370,6 +372,7 @@ template <> class Devirtualizer<true> {
|
|||
public:
|
||||
template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
|
||||
template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k);
|
||||
template <class OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
|
||||
template <class OopClosureType> static bool do_metadata(OopClosureType* closure);
|
||||
};
|
||||
|
||||
|
@ -378,6 +381,7 @@ template <> class Devirtualizer<false> {
|
|||
public:
|
||||
template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
|
||||
template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k);
|
||||
template <class OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
|
||||
template <class OopClosureType> static bool do_metadata(OopClosureType* closure);
|
||||
};
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#include "oops/typeArrayKlass.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
inline void MetadataAwareOopClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
assert(_klass_closure._oop_closure == this, "Must be");
|
||||
|
||||
bool claim = true; // Must claim the class loader data before processing.
|
||||
|
@ -45,11 +45,9 @@ inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld)
|
|||
|
||||
inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
|
||||
ClassLoaderData* cld = k->class_loader_data();
|
||||
do_class_loader_data(cld);
|
||||
do_cld_nv(cld);
|
||||
}
|
||||
|
||||
inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
#ifdef ASSERT
|
||||
// This verification is applied to all visited oops.
|
||||
// The closures can turn is off by overriding should_verify_oops().
|
||||
|
@ -78,6 +76,10 @@ inline void Devirtualizer<true>::do_klass(OopClosureType* closure, Klass* k) {
|
|||
closure->do_klass_nv(k);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
void Devirtualizer<true>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
|
||||
closure->do_cld_nv(cld);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
inline bool Devirtualizer<true>::do_metadata(OopClosureType* closure) {
|
||||
// Make sure the non-virtual and the virtual versions match.
|
||||
assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata");
|
||||
|
@ -96,6 +98,10 @@ void Devirtualizer<false>::do_klass(OopClosureType* closure, Klass* k) {
|
|||
closure->do_klass(k);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
void Devirtualizer<false>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
|
||||
closure->do_cld(cld);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
bool Devirtualizer<false>::do_metadata(OopClosureType* closure) {
|
||||
return closure->do_metadata();
|
||||
}
|
||||
|
|
|
@ -148,12 +148,12 @@ class ArrayKlass: public Klass {
|
|||
// Used to generate the declarations in the *ArrayKlass header files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
|
||||
void oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
|
||||
#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
|
@ -162,15 +162,15 @@ class ArrayKlass: public Klass {
|
|||
|
||||
#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix) \
|
||||
\
|
||||
int KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \
|
||||
return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end); \
|
||||
void KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \
|
||||
oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* No reverse implementation ATM. */ \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)
|
||||
|
|
|
@ -51,7 +51,6 @@ public:
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
@ -71,19 +70,19 @@ public:
|
|||
// Forward iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Reverse iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
#endif
|
||||
|
||||
// Bounded range iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -34,48 +35,42 @@
|
|||
#include "utilities/macros.hpp"
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
inline void InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
|
||||
// cld can be null if we have a non-registered class loader.
|
||||
if (cld != NULL) {
|
||||
closure->do_class_loader_data(cld);
|
||||
Devirtualizer<nv>::do_cld(closure, cld);
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
inline void InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
|
||||
assert(!Devirtualizer<nv>::do_metadata(closure),
|
||||
"Code to handle metadata is not implemented");
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
inline void InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
if (mr.contains(obj)) {
|
||||
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
|
||||
// cld can be null if we have a non-registered class loader.
|
||||
if (cld != NULL) {
|
||||
closure->do_class_loader_data(cld);
|
||||
Devirtualizer<nv>::do_cld(closure, cld);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
|
|
|
@ -1014,7 +1014,6 @@ class InstanceKlass: public Klass {
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
// as the previous macro based implementation.
|
||||
#ifdef TARGET_COMPILER_visCPP
|
||||
#define INLINE __forceinline
|
||||
#elif defined(TARGET_COMPILER_sparcWorks)
|
||||
#define INLINE __attribute__((always_inline))
|
||||
#else
|
||||
#define INLINE inline
|
||||
#endif
|
||||
|
|
|
@ -91,7 +91,6 @@ class InstanceMirrorKlass: public InstanceKlass {
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
@ -121,21 +120,21 @@ class InstanceMirrorKlass: public InstanceKlass {
|
|||
// Forward iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
|
||||
// Reverse iteration
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
#endif
|
||||
|
||||
|
||||
// Bounded range iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over the static fields.
|
||||
template <bool nv, class OopClosureType>
|
||||
|
|
|
@ -53,30 +53,40 @@ void InstanceMirrorKlass::oop_oop_iterate_statics(oop obj, OopClosureType* closu
|
|||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
// We'll get NULL for primitive mirrors.
|
||||
if (klass != NULL) {
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
// An anonymous class doesn't have its own class loader, so when handling
|
||||
// the java mirror for an anonymous class we need to make sure its class
|
||||
// loader data is claimed, this is done by calling do_cld explicitly.
|
||||
// For non-anonymous classes the call to do_cld is made when the class
|
||||
// loader itself is handled.
|
||||
Devirtualizer<nv>::do_cld(closure, klass->class_loader_data());
|
||||
} else {
|
||||
Devirtualizer<nv>::do_klass(closure, klass);
|
||||
}
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
}
|
||||
|
||||
oop_oop_iterate_statics<nv>(obj, closure);
|
||||
|
||||
return oop_size(obj);
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
|
||||
InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
|
||||
|
||||
return oop_size(obj);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -115,7 +125,7 @@ void InstanceMirrorKlass::oop_oop_iterate_statics_bounded(oop obj, OopClosureTyp
|
|||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
void InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
|
@ -129,8 +139,6 @@ int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closur
|
|||
}
|
||||
|
||||
oop_oop_iterate_statics_bounded<nv>(obj, closure, mr);
|
||||
|
||||
return oop_size(obj);
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
|
|
|
@ -67,7 +67,6 @@ class InstanceRefKlass: public InstanceKlass {
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
@ -88,19 +87,19 @@ class InstanceRefKlass: public InstanceKlass {
|
|||
private:
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
// Reverse iteration
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Bounded range iteration
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Reference processing part of the iterators.
|
||||
|
||||
|
|
|
@ -106,37 +106,27 @@ void InstanceRefKlass::oop_oop_iterate_ref_processing_bounded(oop obj, OopClosur
|
|||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
// Get size before changing pointers
|
||||
int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
void InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
|
||||
oop_oop_iterate_ref_processing<nv>(obj, closure);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceRefKlass::
|
||||
oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
// Get size before changing pointers
|
||||
int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
void InstanceRefKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
|
||||
oop_oop_iterate_ref_processing<nv>(obj, closure);
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
// Get size before changing pointers
|
||||
int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
void InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
|
||||
oop_oop_iterate_ref_processing_bounded<nv>(obj, closure, mr);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
|
|
|
@ -572,7 +572,6 @@ protected:
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
virtual void oop_ms_follow_contents(oop obj) = 0;
|
||||
virtual int oop_ms_adjust_pointers(oop obj) = 0;
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
@ -585,16 +584,16 @@ protected:
|
|||
// Iterators specialized to particular subtypes
|
||||
// of ExtendedOopClosure, to avoid closure virtual calls.
|
||||
#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \
|
||||
virtual void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \
|
||||
/* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \
|
||||
virtual int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
|
||||
virtual void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
|
||||
virtual void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
|
@ -661,12 +660,12 @@ protected:
|
|||
// Used to generate declarations in the *Klass header files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
|
@ -674,22 +673,22 @@ protected:
|
|||
// Used to generate definitions in the *Klass.inline.hpp files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
void KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
void KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_OOPS_KLASS_HPP
|
||||
|
|
|
@ -105,7 +105,6 @@ class ObjArrayKlass : public ArrayKlass {
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
@ -125,15 +124,15 @@ class ObjArrayKlass : public ArrayKlass {
|
|||
|
||||
// Iterate over oop elements and metadata.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
// Iterate over oop elements within mr, and metadata.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over oop elements with indices within [start, end), and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
|
||||
inline void oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
|
||||
|
||||
// Iterate over oop elements within [start, end), and metadata.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
|
|
|
@ -85,46 +85,31 @@ void ObjArrayKlass::oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureTy
|
|||
}
|
||||
|
||||
template <bool nv, typename OopClosureType>
|
||||
int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call.
|
||||
int size = a->object_size();
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, obj->klass());
|
||||
}
|
||||
|
||||
oop_oop_iterate_elements<nv>(a, closure);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
template <bool nv, typename OopClosureType>
|
||||
int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
assert(obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call
|
||||
int size = a->object_size();
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, a->klass());
|
||||
}
|
||||
|
||||
oop_oop_iterate_elements_bounded<nv>(a, closure, mr);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, a->klass());
|
||||
}
|
||||
|
||||
T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr<T>(start);
|
||||
T* high = (T*)a->base() + end;
|
||||
|
||||
|
@ -134,21 +119,15 @@ void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureT
|
|||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
template <bool nv, class OopClosureType>
|
||||
int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
|
||||
void ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
|
||||
assert(obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call
|
||||
int size = a->object_size();
|
||||
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_range_specialized<nv, narrowOop>(a, closure, start, end);
|
||||
} else {
|
||||
oop_oop_iterate_range_specialized<nv, oop>(a, closure, start, end);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
|
|
|
@ -46,8 +46,8 @@ oop objArrayOopDesc::atomic_compare_exchange_oop(int index, oop exchange_value,
|
|||
|
||||
#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \
|
||||
return ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
|
||||
void objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN)
|
||||
|
|
|
@ -106,7 +106,7 @@ private:
|
|||
|
||||
// special iterators for index ranges, returns size of object
|
||||
#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_iterate_range(OopClosureType* blk, int start, int end);
|
||||
void oop_iterate_range(OopClosureType* blk, int start, int end);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DECL)
|
||||
|
|
|
@ -330,7 +330,6 @@ class oopDesc {
|
|||
// Garbage Collection support
|
||||
|
||||
// Mark Sweep
|
||||
void ms_follow_contents();
|
||||
// Adjust all pointers in this object to point at it's forwarded location and
|
||||
// return the size of this oop. This is used by the MarkSweep collector.
|
||||
int ms_adjust_pointers();
|
||||
|
@ -345,16 +344,24 @@ class oopDesc {
|
|||
|
||||
// iterators, returns size of object
|
||||
#define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_iterate(OopClosureType* blk); \
|
||||
int oop_iterate(OopClosureType* blk, MemRegion mr); // Only in mr.
|
||||
void oop_iterate(OopClosureType* blk); \
|
||||
void oop_iterate(OopClosureType* blk, MemRegion mr); // Only in mr.
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DECL)
|
||||
|
||||
#define OOP_ITERATE_SIZE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_iterate_size(OopClosureType* blk); \
|
||||
int oop_iterate_size(OopClosureType* blk, MemRegion mr); // Only in mr.
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_SIZE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_SIZE_DECL)
|
||||
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_iterate_backwards(OopClosureType* blk);
|
||||
void oop_iterate_backwards(OopClosureType* blk);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL)
|
||||
|
|
|
@ -695,10 +695,6 @@ inline intptr_t oopDesc::identity_hash() {
|
|||
}
|
||||
}
|
||||
|
||||
inline void oopDesc::ms_follow_contents() {
|
||||
klass()->oop_ms_follow_contents(this);
|
||||
}
|
||||
|
||||
inline int oopDesc::ms_adjust_pointers() {
|
||||
debug_only(int check_size = size());
|
||||
int s = klass()->oop_ms_adjust_pointers(this);
|
||||
|
@ -732,32 +728,48 @@ inline void oopDesc::ps_push_contents(PSPromotionManager* pm) {
|
|||
|
||||
#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk) { \
|
||||
return klass()->oop_oop_iterate##nv_suffix(this, blk); \
|
||||
inline void oopDesc::oop_iterate(OopClosureType* blk) { \
|
||||
klass()->oop_oop_iterate##nv_suffix(this, blk); \
|
||||
} \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
|
||||
return klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \
|
||||
inline void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
|
||||
klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \
|
||||
}
|
||||
|
||||
#define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate_size(OopClosureType* blk) { \
|
||||
Klass* k = klass(); \
|
||||
int size = size_given_klass(k); \
|
||||
k->oop_oop_iterate##nv_suffix(this, blk); \
|
||||
return size; \
|
||||
} \
|
||||
\
|
||||
inline int oopDesc::oop_iterate_size(OopClosureType* blk, \
|
||||
MemRegion mr) { \
|
||||
Klass* k = klass(); \
|
||||
int size = size_given_klass(k); \
|
||||
k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \
|
||||
return size; \
|
||||
}
|
||||
|
||||
inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
|
||||
// The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
|
||||
// the do_oop calls, but turns off all other features in ExtendedOopClosure.
|
||||
NoHeaderExtendedOopClosure cl(blk);
|
||||
return oop_iterate(&cl);
|
||||
return oop_iterate_size(&cl);
|
||||
}
|
||||
|
||||
inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
|
||||
NoHeaderExtendedOopClosure cl(blk);
|
||||
return oop_iterate(&cl, mr);
|
||||
return oop_iterate_size(&cl, mr);
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
|
||||
return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
|
||||
inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
|
||||
klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
|
||||
}
|
||||
#else
|
||||
#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
@ -765,6 +777,7 @@ inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
|
|||
|
||||
#define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \
|
||||
OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
|
||||
OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
|
||||
|
|
|
@ -75,7 +75,6 @@ class TypeArrayKlass : public ArrayKlass {
|
|||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
|
@ -90,15 +89,15 @@ class TypeArrayKlass : public ArrayKlass {
|
|||
|
||||
private:
|
||||
// The implementation used by all oop_oop_iterate functions in TypeArrayKlasses.
|
||||
inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
|
||||
inline void oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
|
||||
|
||||
// Wraps oop_oop_iterate_impl to conform to macros.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
inline void oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
// Wraps oop_oop_iterate_impl to conform to macros.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -33,22 +33,20 @@
|
|||
|
||||
class ExtendedOopClosure;
|
||||
|
||||
inline int TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
|
||||
inline void TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
typeArrayOop t = typeArrayOop(obj);
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
template <bool nv, typename OopClosureType>
|
||||
int TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
return oop_oop_iterate_impl(obj, closure);
|
||||
void TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
oop_oop_iterate_impl(obj, closure);
|
||||
}
|
||||
|
||||
template <bool nv, typename OopClosureType>
|
||||
int TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
return oop_oop_iterate_impl(obj, closure);
|
||||
void TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
oop_oop_iterate_impl(obj, closure);
|
||||
}
|
||||
|
||||
#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
|
|
|
@ -27,6 +27,17 @@
|
|||
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
// Stack is used by the GC code and in some hot paths a lot of the Stack
|
||||
// code gets inlined. This is generally good, but when too much code has
|
||||
// been inlined, no further inlining is allowed by GCC. Therefore we need
|
||||
// to prevent parts of the slow path in Stack to be inlined to allow other
|
||||
// code to be.
|
||||
#if defined(TARGET_COMPILER_gcc)
|
||||
#define NOINLINE __attribute__((noinline))
|
||||
#else
|
||||
#define NOINLINE
|
||||
#endif
|
||||
|
||||
template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
|
||||
size_t max_size):
|
||||
_seg_size(segment_size),
|
||||
|
@ -141,7 +152,7 @@ void Stack<E, F>::free(E* addr, size_t bytes)
|
|||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
void Stack<E, F>::push_segment()
|
||||
NOINLINE void Stack<E, F>::push_segment()
|
||||
{
|
||||
assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
|
||||
E* next;
|
||||
|
@ -269,4 +280,6 @@ E* StackIterator<E, F>::next_addr()
|
|||
return _cur_seg + --_cur_seg_size;
|
||||
}
|
||||
|
||||
#undef NOINLINE
|
||||
|
||||
#endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue