From 4357348f6f41d2cac33cfa166dadcf7e2a32b968 Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Wed, 22 Apr 2015 08:29:39 +0200 Subject: [PATCH 01/19] 8078340: Remove the unused PSParallelCompact::KeepAliveClosure Reviewed-by: ehelin, tschatzl --- .../parallelScavenge/psParallelCompact.cpp | 3 --- .../parallelScavenge/psParallelCompact.hpp | 17 ----------------- 2 files changed, 20 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 7ef05d16a62..2fe64102aa9 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -820,9 +820,6 @@ PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } -void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } -void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } - PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp index 1daa065e92e..6998df9970b 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -933,17 +933,6 @@ class PSParallelCompact : AllStatic { virtual bool do_object_b(oop p); }; - class KeepAliveClosure: public OopClosure { - private: - ParCompactionManager* _compaction_manager; - protected: - template inline void do_oop_work(T* p); - public: - KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - }; - class FollowStackClosure: public VoidClosure { private: ParCompactionManager* _compaction_manager; @@ -967,7 +956,6 @@ class PSParallelCompact : AllStatic { void do_klass(Klass* klass); }; - friend class KeepAliveClosure; friend class FollowStackClosure; friend class AdjustPointerClosure; friend class AdjustKlassClosure; @@ -1337,11 +1325,6 @@ inline bool PSParallelCompact::is_marked(oop obj) { return mark_bitmap()->is_marked(obj); } -template -inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { - mark_and_push(_compaction_manager, p); -} - inline bool PSParallelCompact::print_phases() { return _print_phases; } From 9631881fe3bfb27519c4387082e6490dccc29ebb Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Wed, 22 Apr 2015 09:09:42 +0200 Subject: [PATCH 02/19] 8078341: Remove the unused PSParallelCompact::_updated_int_array_klass_obj Reviewed-by: ehelin, tschatzl --- .../parallelScavenge/psParallelCompact.cpp | 1 - .../parallelScavenge/psParallelCompact.hpp | 6 ------ 2 files changed, 7 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 2fe64102aa9..212de5899cf 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -108,7 +108,6 @@ SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; bool PSParallelCompact::_print_phases = false; ReferenceProcessor* PSParallelCompact::_ref_processor = NULL; -Klass* PSParallelCompact::_updated_int_array_klass_obj = NULL; double PSParallelCompact::_dwl_mean; double PSParallelCompact::_dwl_std_dev; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp index 6998df9970b..6af90b299dc 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -982,9 +982,6 @@ class PSParallelCompact : AllStatic { // Reference processing (used in ...follow_contents) static ReferenceProcessor* _ref_processor; - // Updated location of intArrayKlassObj. - static Klass* _updated_int_array_klass_obj; - // Values computed at initialization and used by dead_wood_limiter(). static double _dwl_mean; static double _dwl_std_dev; @@ -1181,9 +1178,6 @@ class PSParallelCompact : AllStatic { // Used to add tasks static GCTaskManager* const gc_task_manager(); - static Klass* updated_int_array_klass_obj() { - return _updated_int_array_klass_obj; - } // Marking support static inline bool mark_obj(oop obj); From 7d01ef4a6e431665129736df44d72350b9d0a8d8 Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Wed, 22 Apr 2015 10:31:15 +0200 Subject: [PATCH 03/19] 8078345: Move PSParallelCompact::mark_and_push to ParCompactionManager Reviewed-by: jwilhelm, brutisso --- .../parallelScavenge/pcTasks.cpp | 12 ++--- .../parallelScavenge/psCompactionManager.cpp | 20 ++++---- .../parallelScavenge/psCompactionManager.hpp | 42 ++++++++++++++++- .../psCompactionManager.inline.hpp | 47 ++++++++++++++++++- .../parallelScavenge/psParallelCompact.cpp | 17 +------ .../parallelScavenge/psParallelCompact.hpp | 45 +----------------- .../psParallelCompact.inline.hpp | 27 ----------- 7 files changed, 106 insertions(+), 104 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp index b220c12aa82..bb1caa063fb 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp @@ -57,7 +57,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) { ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true); MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations); @@ -85,8 +85,8 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); switch (_root_type) { case universe: @@ -156,8 +156,8 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which) PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowStackClosure follow_stack_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowStackClosure follow_stack_closure(cm); _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(), mark_and_push_closure, follow_stack_closure); } @@ -213,7 +213,7 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) { ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); oop obj = NULL; ObjArrayTask task; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp index d56c4f777cd..db17a8a6b28 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @@ -179,11 +179,11 @@ ParCompactionManager::gc_thread_compaction_manager(int index) { void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { assert(obj != NULL, "can't follow the content of NULL object"); - PSParallelCompact::follow_klass(cm, this); + cm->follow_klass(this); // Only mark the header and let the scan of the meta-data mark // everything else. - PSParallelCompact::MarkAndPushClosure cl(cm); + ParCompactionManager::MarkAndPushClosure cl(cm); InstanceKlass::oop_oop_iterate_oop_maps(obj, &cl); } @@ -201,9 +201,9 @@ void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* // the call to follow_class_loader is made when the class loader itself // is handled. if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { - PSParallelCompact::follow_class_loader(cm, klass->class_loader_data()); + cm->follow_class_loader(klass->class_loader_data()); } else { - PSParallelCompact::follow_klass(cm, klass); + cm->follow_klass(klass); } } else { // If klass is NULL then this a mirror for a primitive type. @@ -212,7 +212,7 @@ void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* assert(java_lang_Class::is_primitive(obj), "Sanity check"); } - PSParallelCompact::MarkAndPushClosure cl(cm); + ParCompactionManager::MarkAndPushClosure cl(cm); oop_oop_iterate_statics(obj, &cl); } @@ -221,7 +221,7 @@ void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionMana ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); if (loader_data != NULL) { - PSParallelCompact::follow_class_loader(cm, loader_data); + cm->follow_class_loader(loader_data); } } @@ -253,7 +253,7 @@ static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj)); } ) - PSParallelCompact::mark_and_push(cm, referent_addr); + cm->mark_and_push(referent_addr); } } T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); @@ -269,7 +269,7 @@ static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, PTR_FORMAT, p2i(discovered_addr)); } ) - PSParallelCompact::mark_and_push(cm, discovered_addr); + cm->mark_and_push(discovered_addr); } } else { #ifdef ASSERT @@ -283,7 +283,7 @@ static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, p2i(obj))); #endif } - PSParallelCompact::mark_and_push(cm, next_addr); + cm->mark_and_push(next_addr); klass->InstanceKlass::oop_pc_follow_contents(obj, cm); } @@ -297,7 +297,7 @@ void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) } void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { - PSParallelCompact::follow_klass(cm, this); + cm->follow_klass(this); if (UseCompressedOops) { oop_pc_follow_contents_specialized(objArrayOop(obj), 0, cm); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp index 4e5544da50d..9c281725a4c 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp @@ -170,10 +170,17 @@ private: bool should_copy(); // Save for later processing. Must not fail. - inline void push(oop obj) { _marking_stack.push(obj); } + inline void push(oop obj); inline void push_objarray(oop objarray, size_t index); inline void push_region(size_t index); + // Check mark and maybe push on marking stack. + template inline void mark_and_push(T* p); + + inline void follow_klass(Klass* klass); + + void follow_class_loader(ClassLoaderData* klass); + // Access function for compaction managers static ParCompactionManager* gc_thread_compaction_manager(int index); @@ -200,6 +207,39 @@ private: void follow_contents(objArrayOop array, int index); void update_contents(oop obj); + + class MarkAndPushClosure: public ExtendedOopClosure { + private: + ParCompactionManager* _compaction_manager; + public: + MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + + template void do_oop_nv(T* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + + // This closure provides its own oop verification code. + debug_only(virtual bool should_verify_oops() { return false; }) + }; + + class FollowStackClosure: public VoidClosure { + private: + ParCompactionManager* _compaction_manager; + public: + FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + virtual void do_void(); + }; + + // The one and only place to start following the classes. + // Should only be applied to the ClassLoaderData klasses list. + class FollowKlassClosure : public KlassClosure { + private: + MarkAndPushClosure* _mark_and_push_closure; + public: + FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) : + _mark_and_push_closure(mark_and_push_closure) { } + void do_klass(Klass* klass); + }; }; inline ParCompactionManager* ParCompactionManager::manager_array(int index) { diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp index fb700318b9c..afcc8034d5a 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp @@ -32,6 +32,10 @@ #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" +inline void ParCompactionManager::push(oop obj) { + _marking_stack.push(obj); +} + void ParCompactionManager::push_objarray(oop obj, size_t index) { ObjArrayTask task(obj, index); @@ -50,6 +54,47 @@ void ParCompactionManager::push_region(size_t index) region_stack()->push(index); } +template +inline void ParCompactionManager::mark_and_push(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); + + if (mark_bitmap()->is_unmarked(obj) && PSParallelCompact::mark_obj(obj)) { + push(obj); + } + } +} + +template +inline void ParCompactionManager::MarkAndPushClosure::do_oop_nv(T* p) { + _compaction_manager->mark_and_push(p); +} + +inline void ParCompactionManager::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); } +inline void ParCompactionManager::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); } + +inline void ParCompactionManager::follow_klass(Klass* klass) { + oop holder = klass->klass_holder(); + mark_and_push(&holder); +} + +inline void ParCompactionManager::FollowStackClosure::do_void() { + _compaction_manager->follow_marking_stacks(); +} + +inline void ParCompactionManager::FollowKlassClosure::do_klass(Klass* klass) { + klass->oops_do(_mark_and_push_closure); +} + +inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) { + MarkAndPushClosure mark_and_push_closure(this); + FollowKlassClosure follow_klass_closure(&mark_and_push_closure); + + cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true); +} + inline void ParCompactionManager::follow_contents(oop obj) { assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked"); obj->pc_follow_contents(this); @@ -69,7 +114,7 @@ inline void oop_pc_follow_contents_specialized(objArrayOop obj, int index, ParCo // Push the non-NULL elements of the next stride on the marking stack. for (T* e = beg; e < end; e++) { - PSParallelCompact::mark_and_push(cm, e); + cm->mark_and_push(e); } if (end_index < len) { diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 212de5899cf..bba6c1fd340 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -822,11 +822,6 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap( PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } - -void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) { - klass->oops_do(_mark_and_push_closure); -} void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } @@ -2346,8 +2341,8 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, TaskQueueSetSuper* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(active_gc_threads, qset); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowStackClosure follow_stack_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowStackClosure follow_stack_closure(cm); // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2421,14 +2416,6 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, _gc_tracer.report_object_count_after_gc(is_alive_closure()); } -void PSParallelCompact::follow_class_loader(ParCompactionManager* cm, - ClassLoaderData* cld) { - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); - - cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true); -} - // This should be moved to the shared markSweep code! class PSAlwaysTrueClosure: public BoolObjectClosure { public: diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp index 6af90b299dc..8530bb07091 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -28,7 +28,6 @@ #include "gc_implementation/parallelScavenge/objectStartArray.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" -#include "gc_implementation/parallelScavenge/psCompactionManager.hpp" #include "gc_implementation/shared/collectorCounters.hpp" #include "gc_implementation/shared/mutableSpace.hpp" #include "gc_interface/collectedHeap.hpp" @@ -933,14 +932,6 @@ class PSParallelCompact : AllStatic { virtual bool do_object_b(oop p); }; - class FollowStackClosure: public VoidClosure { - private: - ParCompactionManager* _compaction_manager; - public: - FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_void(); - }; - class AdjustPointerClosure: public ExtendedOopClosure { public: template void do_oop_nv(T* p); @@ -956,11 +947,8 @@ class PSParallelCompact : AllStatic { void do_klass(Klass* klass); }; - friend class FollowStackClosure; friend class AdjustPointerClosure; friend class AdjustKlassClosure; - friend class FollowKlassClosure; - friend class InstanceClassLoaderKlass; friend class RefProcTaskProxy; private: @@ -1127,30 +1115,6 @@ class PSParallelCompact : AllStatic { static void reset_millis_since_last_gc(); public: - class MarkAndPushClosure: public ExtendedOopClosure { - private: - ParCompactionManager* _compaction_manager; - public: - MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - - template void do_oop_nv(T* p); - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) - }; - - // The one and only place to start following the classes. - // Should only be applied to the ClassLoaderData klasses list. - class FollowKlassClosure : public KlassClosure { - private: - MarkAndPushClosure* _mark_and_push_closure; - public: - FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) : - _mark_and_push_closure(mark_and_push_closure) { } - void do_klass(Klass* klass); - }; PSParallelCompact(); @@ -1182,16 +1146,9 @@ class PSParallelCompact : AllStatic { // Marking support static inline bool mark_obj(oop obj); static inline bool is_marked(oop obj); - // Check mark and maybe push on marking stack - template static inline void mark_and_push(ParCompactionManager* cm, - T* p); + template static inline void adjust_pointer(T* p); - static inline void follow_klass(ParCompactionManager* cm, Klass* klass); - - static void follow_class_loader(ParCompactionManager* cm, - ClassLoaderData* klass); - // Compaction support. // Return true if p is in the range [beg_addr, end_addr). static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp index a817bd00051..5a18e1712a3 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp @@ -26,38 +26,11 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" -#include "gc_implementation/parallelScavenge/psCompactionManager.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_interface/collectedHeap.hpp" #include "oops/klass.hpp" #include "oops/oop.inline.hpp" -template -inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); - - if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { - cm->push(obj); - } - } -} - -template -inline void PSParallelCompact::MarkAndPushClosure::do_oop_nv(T* p) { - mark_and_push(_compaction_manager, p); -} - -inline void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); } -inline void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); } - -inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { - oop holder = klass->klass_holder(); - mark_and_push(cm, &holder); -} - template inline void PSParallelCompact::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); From 689d9a58b14bd865969f6a20b4ef419a78ea37f5 Mon Sep 17 00:00:00 2001 From: Michail Chernov Date: Wed, 22 Apr 2015 17:05:00 +0200 Subject: [PATCH 04/19] 8071462: Remove G1ParGCAllocator::alloc_buffer_waste Removed G1ParGCAllocator::alloc_buffer_waste, added method to obtain waste Reviewed-by: tschatzl, sjohanss --- .../vm/gc_implementation/g1/g1Allocator.cpp | 14 ++++++-- .../vm/gc_implementation/g1/g1Allocator.hpp | 26 ++++---------- .../g1/g1ParScanThreadState.cpp | 5 +-- .../g1/g1ParScanThreadState.hpp | 8 +---- .../parNew/parNewGeneration.cpp | 12 ++----- .../vm/gc_implementation/shared/plab.cpp | 36 ++++++++++++++++--- .../vm/gc_implementation/shared/plab.hpp | 31 +++++++++++----- 7 files changed, 77 insertions(+), 55 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp index d4f2f190690..e8b3bbc3a05 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp @@ -119,7 +119,6 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest, size_t gclab_word_size = _g1h->desired_plab_sz(dest); if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { G1PLAB* alloc_buf = alloc_buffer(dest, context); - add_to_alloc_buffer_waste(alloc_buf->words_remaining()); alloc_buf->retire(); HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context); @@ -153,8 +152,19 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() { for (uint state = 0; state < InCSetState::Num; state++) { G1PLAB* const buf = _alloc_buffers[state]; if (buf != NULL) { - add_to_alloc_buffer_waste(buf->words_remaining()); buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state)); } } } + +void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) { + wasted = 0; + undo_wasted = 0; + for (uint state = 0; state < InCSetState::Num; state++) { + G1PLAB * const buf = _alloc_buffers[state]; + if (buf != NULL) { + wasted += buf->waste(); + undo_wasted += buf->undo_waste(); + } + } +} diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp index 78741e33a1e..2830a80ae91 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp @@ -188,12 +188,6 @@ protected: // architectures have a special compare against zero instructions. const uint _survivor_alignment_bytes; - size_t _alloc_buffer_waste; - size_t _undo_waste; - - void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } - void add_to_undo_waste(size_t waste) { _undo_waste += waste; } - virtual void retire_alloc_buffers() = 0; virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0; @@ -213,15 +207,12 @@ protected: public: G1ParGCAllocator(G1CollectedHeap* g1h) : - _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()), - _alloc_buffer_waste(0), _undo_waste(0) { - } + _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { } virtual ~G1ParGCAllocator() { } static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h); - size_t alloc_buffer_waste() { return _alloc_buffer_waste; } - size_t undo_waste() {return _undo_waste; } + virtual void waste(size_t& wasted, size_t& undo_wasted) = 0; // Allocate word_sz words in dest, either directly into the regions or by // allocating a new PLAB. Returns the address of the allocated memory, NULL if @@ -253,14 +244,7 @@ public: } void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) { - if (alloc_buffer(dest, context)->contains(obj)) { - assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1), - "should contain whole object"); - alloc_buffer(dest, context)->undo_allocation(obj, word_sz); - } else { - CollectedHeap::fill_with_object(obj, word_sz); - add_to_undo_waste(word_sz); - } + alloc_buffer(dest, context)->undo_allocation(obj, word_sz); } }; @@ -280,7 +264,9 @@ public: return _alloc_buffers[dest.value()]; } - virtual void retire_alloc_buffers() ; + virtual void retire_alloc_buffers(); + + virtual void waste(size_t& wasted, size_t& undo_wasted); }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp index e7f6e18c4da..9cd40826f70 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp @@ -95,8 +95,9 @@ G1ParScanThreadState::print_termination_stats(int i, const double elapsed_ms = elapsed_time() * 1000.0; const double s_roots_ms = strong_roots_time() * 1000.0; const double term_ms = term_time() * 1000.0; - const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste(); - const size_t undo_waste = _g1_par_allocator->undo_waste(); + size_t alloc_buffer_waste = 0; + size_t undo_waste = 0; + _g1_par_allocator->waste(alloc_buffer_waste, undo_waste); st->print_cr("%3d %9.2f %9.2f %6.2f " "%9.2f %6.2f " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp index 0b69122e606..787257b919f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,9 +54,6 @@ class G1ParScanThreadState : public StackObj { uint _tenuring_threshold; G1ParScanClosure _scanner; - size_t _alloc_buffer_waste; - size_t _undo_waste; - OopsInHeapRegionClosure* _evac_failure_cl; int _hash_seed; @@ -78,9 +75,6 @@ class G1ParScanThreadState : public StackObj { #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) - void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } - void add_to_undo_waste(size_t waste) { _undo_waste += waste; } - DirtyCardQueue& dirty_card_queue() { return _dcq; } G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp index 68b04be6b79..107c807238b 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -272,16 +272,8 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { } -void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, - size_t word_sz) { - // Is the alloc in the current alloc buffer? - if (to_space_alloc_buffer()->contains(obj)) { - assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), - "Should contain whole object."); - to_space_alloc_buffer()->undo_allocation(obj, word_sz); - } else { - CollectedHeap::fill_with_object(obj, word_sz); - } +void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { + to_space_alloc_buffer()->undo_allocation(obj, word_sz); } void ParScanThreadState::print_promotion_failure_size() { diff --git a/hotspot/src/share/vm/gc_implementation/shared/plab.cpp b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp index 3ecc205df31..93eb5e94f14 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/plab.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/plab.hpp" +#include "gc_interface/collectedHeap.hpp" #include "memory/threadLocalAllocBuffer.hpp" #include "oops/arrayOop.hpp" #include "oops/oop.inline.hpp" @@ -39,7 +40,7 @@ size_t PLAB::max_size() { PLAB::PLAB(size_t desired_plab_sz_) : _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL), - _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0) + _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0) { // ArrayOopDesc::header_size depends on command line initialization. AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0; @@ -62,13 +63,15 @@ void PLAB::flush_and_retire_stats(PLABStats* stats) { // Now flush the statistics. stats->add_allocated(_allocated); stats->add_wasted(_wasted); + stats->add_undo_wasted(_undo_wasted); stats->add_unused(unused); // Since we have flushed the stats we need to clear the _allocated and _wasted // fields in case somebody retains an instance of this over GCs. Not doing so // will artifically inflate the values in the statistics. - _allocated = 0; - _wasted = 0; + _allocated = 0; + _wasted = 0; + _undo_wasted = 0; } void PLAB::retire() { @@ -84,6 +87,28 @@ size_t PLAB::retire_internal() { return result; } +void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) { + CollectedHeap::fill_with_object(obj, word_sz); + _undo_wasted += word_sz; +} + +void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) { + assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo"); + assert(pointer_delta(_top, obj) == word_sz, "Bad undo"); + _top = obj; +} + +void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) { + // Is the alloc in the current alloc buffer? + if (contains(obj)) { + assert(contains(obj + word_sz - 1), + "should contain whole object"); + undo_last_allocation(obj, word_sz); + } else { + add_undo_waste(obj, word_sz); + } +} + // Compute desired plab size and latch result for later // use. This should be called once at the end of parallel // scavenge; it clears the sensor accumulators. @@ -98,8 +123,9 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { err_msg("Inconsistency in PLAB stats: " "_allocated: "SIZE_FORMAT", " "_wasted: "SIZE_FORMAT", " - "_unused: "SIZE_FORMAT, - _allocated, _wasted, _unused)); + "_unused: "SIZE_FORMAT", " + "_undo_wasted: "SIZE_FORMAT, + _allocated, _wasted, _unused, _undo_wasted)); _allocated = 1; } diff --git a/hotspot/src/share/vm/gc_implementation/shared/plab.hpp b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp index 324adfcd8a9..3660f16e610 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/plab.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp @@ -45,6 +45,7 @@ protected: // In support of ergonomic sizing of PLAB's size_t _allocated; // in HeapWord units size_t _wasted; // in HeapWord units + size_t _undo_wasted; char tail[32]; static size_t AlignmentReserve; @@ -62,6 +63,12 @@ protected: // the amount of remaining space. size_t retire_internal(); + void add_undo_waste(HeapWord* obj, size_t word_sz); + + // Undo the last allocation in the buffer, which is required to be of the + // "obj" of the given "word_sz". + void undo_last_allocation(HeapWord* obj, size_t word_sz); + public: // Initializes the buffer to be empty, but with the given "word_sz". // Must get initialized with "set_buf" for an allocation to succeed. @@ -90,18 +97,17 @@ public: // Allocate the object aligned to "alignment_in_bytes". HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes); - // Undo the last allocation in the buffer, which is required to be of the + // Undo any allocation in the buffer, which is required to be of the // "obj" of the given "word_sz". - void undo_allocation(HeapWord* obj, size_t word_sz) { - assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo"); - assert(pointer_delta(_top, obj) == word_sz, "Bad undo"); - _top = obj; - } + void undo_allocation(HeapWord* obj, size_t word_sz); // The total (word) size of the buffer, including both allocated and // unallocated space. size_t word_sz() { return _word_sz; } + size_t waste() { return _wasted; } + size_t undo_waste() { return _undo_wasted; } + // Should only be done if we are about to reset with a new buffer of the // given size. void set_word_size(size_t new_word_sz) { @@ -146,20 +152,23 @@ public: class PLABStats VALUE_OBJ_CLASS_SPEC { size_t _allocated; // Total allocated size_t _wasted; // of which wasted (internal fragmentation) + size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size) size_t _unused; // Unused in last buffer size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized AdaptiveWeightedAverage _filter; // Integrator with decay void reset() { - _allocated = 0; - _wasted = 0; - _unused = 0; + _allocated = 0; + _wasted = 0; + _undo_wasted = 0; + _unused = 0; } public: PLABStats(size_t desired_plab_sz_, unsigned wt) : _allocated(0), _wasted(0), + _undo_wasted(0), _unused(0), _desired_plab_sz(desired_plab_sz_), _filter(wt) @@ -192,6 +201,10 @@ class PLABStats VALUE_OBJ_CLASS_SPEC { void add_wasted(size_t v) { Atomic::add_ptr(v, &_wasted); } + + void add_undo_wasted(size_t v) { + Atomic::add_ptr(v, &_undo_wasted); + } }; #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP From 4a6024895d2d483f8da00e06ccd8d4f42eda7942 Mon Sep 17 00:00:00 2001 From: Christian Tornqvist Date: Wed, 22 Apr 2015 13:58:42 -0700 Subject: [PATCH 05/19] 8077529: [TESTBUG] Remove hotspot.internalvmtests from jprt config Reviewed-by: mikael, sla --- hotspot/test/Makefile | 9 --------- 1 file changed, 9 deletions(-) diff --git a/hotspot/test/Makefile b/hotspot/test/Makefile index 85128e1d976..93c4787d679 100644 --- a/hotspot/test/Makefile +++ b/hotspot/test/Makefile @@ -399,15 +399,6 @@ PHONY_LIST += hotspot_servertest servertest ################################################################ -# internalvmtests (run internal unit tests inside the VM) - -hotspot_internalvmtests internalvmtests: prep $(PRODUCT_HOME) - $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -XX:+ExecuteInternalVMTests -version - -PHONY_LIST += hotspot_internalvmtests internalvmtests - -################################################################ - # Phony targets (e.g. these are not filenames) .PHONY: all clean prep $(PHONY_LIST) From d50c630d1bae0ed789115b69dffa7c304b1621f0 Mon Sep 17 00:00:00 2001 From: Andrey Zakharov Date: Thu, 23 Apr 2015 15:54:47 +0200 Subject: [PATCH 06/19] 8073669: gc/TestSoftReferencesBehaviorOnOOME.java times out in nightlies Changed test scenario to more straight-forward one, added tricks to prevent compiler optimizations and added checks when OOME didn't thrown as expected. Reviewed-by: tschatzl, jwilhelm --- .../gc/TestSoftReferencesBehaviorOnOOME.java | 98 ++++++++++--------- 1 file changed, 50 insertions(+), 48 deletions(-) diff --git a/hotspot/test/gc/TestSoftReferencesBehaviorOnOOME.java b/hotspot/test/gc/TestSoftReferencesBehaviorOnOOME.java index 860f49db4e7..674d8b40272 100644 --- a/hotspot/test/gc/TestSoftReferencesBehaviorOnOOME.java +++ b/hotspot/test/gc/TestSoftReferencesBehaviorOnOOME.java @@ -28,29 +28,30 @@ * @library /testlibrary * @modules java.base/sun.misc * java.management - * @ignore 8073669 * @build TestSoftReferencesBehaviorOnOOME * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 512 2k * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 128k 256k - * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 2k 32k 10 + * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 2k 32k */ import com.oracle.java.testlibrary.Utils; +import com.oracle.java.testlibrary.Asserts; import java.lang.ref.SoftReference; import java.util.LinkedList; import java.util.Random; public class TestSoftReferencesBehaviorOnOOME { - private static final Random rndGenerator = Utils.getRandomInstance(); - + /** + * Test generates a lot of soft references to objects with random payloads. + * Then it provokes OOME and checks that all SoftReferences has been gone + * @param args - [minSize] [maxSize] [freq] + * where + * - minSize - min size of random objects + * - maxSize - max size of random objects + */ public static void main(String[] args) { - int semiRefAllocFrequency = DEFAULT_FREQUENCY; - long minSize = DEFAULT_MIN_SIZE, - maxSize = DEFAULT_MAX_SIZE; - - if ( args.length >= 3 ) { - semiRefAllocFrequency = Integer.parseInt(args[2]); - } + long minSize = DEFAULT_MIN_SIZE; + long maxSize = DEFAULT_MAX_SIZE; if ( args.length >= 2) { maxSize = getBytesCount(args[1]); @@ -60,46 +61,49 @@ public class TestSoftReferencesBehaviorOnOOME { minSize = getBytesCount(args[0]); } - new TestSoftReferencesBehaviorOnOOME().softReferencesOom(minSize, maxSize, semiRefAllocFrequency); + new TestSoftReferencesBehaviorOnOOME().softReferencesOom(minSize, maxSize); } /** * Test that all SoftReferences has been cleared at time of OOM. */ - void softReferencesOom(long minSize, long maxSize, int semiRefAllocFrequency) { - System.out.format( "minSize = %d, maxSize = %d, freq = %d%n", minSize, maxSize, semiRefAllocFrequency ); - long counter = 0; + void softReferencesOom(long minSize, long maxSize) { + System.out.format( "minSize = %d, maxSize = %d%n", minSize, maxSize ); + + LinkedList arrSoftRefs = new LinkedList(); + staticRef = arrSoftRefs; + LinkedList arrObjects = new LinkedList(); + staticRef = arrObjects; long multiplier = maxSize - minSize; - LinkedList arrSoftRefs = new LinkedList(); - LinkedList arrObjects = new LinkedList(); long numberOfNotNulledObjects = 0; - long oomSoftArraySize = 0; try { - while (true) { - // Keep every Xth object to make sure we hit OOM pretty fast - if (counter % semiRefAllocFrequency != 0) { - long allocationSize = ((int) (rndGenerator.nextDouble() * multiplier)) - + minSize; - arrObjects.add(new byte[(int)allocationSize]); - } else { - arrSoftRefs.add(new SoftReference(new Object())); - } - counter++; - if (counter == Long.MAX_VALUE) { - counter = 0; - } + // Lets allocate as many as we can - taking size of all SoftRerefences + // by minimum. So it can provoke some GC but we surely will allocate enough. + long numSofts = (long) ((0.95 * Runtime.getRuntime().totalMemory()) / minSize); + System.out.println("num Soft: " + numSofts); + + while (numSofts-- > 0) { + int allocationSize = ((int) (RND_GENERATOR.nextDouble() * multiplier)) + + (int)minSize; + arrSoftRefs.add(new SoftReference(new byte[allocationSize])); } + + System.out.println("free: " + Runtime.getRuntime().freeMemory()); + + // provoke OOME. + while (true) { + arrObjects.add(new byte[(int) Runtime.getRuntime().totalMemory()]); + } + } catch (OutOfMemoryError oome) { + // Clear allocated ballast, so we don't get another OOM. - + staticRef = null; arrObjects = null; - - // Get the number of soft refs first, so we don't trigger - // another OOM. - oomSoftArraySize = arrSoftRefs.size(); + long oomSoftArraySize = arrSoftRefs.size(); for (SoftReference sr : arrSoftRefs) { Object o = sr.get(); @@ -111,15 +115,14 @@ public class TestSoftReferencesBehaviorOnOOME { // Make sure we clear all refs before we return failure arrSoftRefs = null; - - if (numberOfNotNulledObjects > 0) { - throw new RuntimeException(numberOfNotNulledObjects + " out of " - + oomSoftArraySize + " SoftReferences was not " - + "null at time of OutOfMemoryError"); - } + Asserts.assertFalse(numberOfNotNulledObjects > 0, + "" + numberOfNotNulledObjects + " out of " + + oomSoftArraySize + " SoftReferences was not " + + "null at time of OutOfMemoryError" + ); } finally { - arrSoftRefs = null; - arrObjects = null; + Asserts.assertTrue(arrObjects == null, "OOME hasn't been provoked"); + Asserts.assertTrue(arrSoftRefs == null, "OOME hasn't been provoked"); } } @@ -128,9 +131,7 @@ public class TestSoftReferencesBehaviorOnOOME { long mod = 1; if (arg.trim().length() >= 2) { - mod = postfixes.indexOf( - arg.trim().charAt(arg.length() - 1) - ); + mod = postfixes.indexOf(arg.trim().charAt(arg.length() - 1)); if (mod != -1) { mod = (long) Math.pow(1024, mod+1); @@ -143,7 +144,8 @@ public class TestSoftReferencesBehaviorOnOOME { return Long.parseLong(arg) * mod; } + private static final Random RND_GENERATOR = Utils.getRandomInstance(); private static final long DEFAULT_MIN_SIZE = 512; private static final long DEFAULT_MAX_SIZE = 1024; - private static final int DEFAULT_FREQUENCY = 4; + private static Object staticRef; // to prevent compile optimisations } From 878cf5e6412d61256429caf390505d3f8d10c2b0 Mon Sep 17 00:00:00 2001 From: Kim Barrett Date: Thu, 23 Apr 2015 11:26:32 -0400 Subject: [PATCH 07/19] 8031401: Remove unused code in the reference processor Assume pending_list uses discovered field and remove resulting dead code. Reviewed-by: brutisso, pliden --- .../parallelScavenge/psCompactionManager.cpp | 37 ++----- .../parallelScavenge/psPromotionManager.cpp | 34 ++---- .../vm/gc_implementation/shared/markSweep.cpp | 37 ++----- .../share/vm/memory/referenceProcessor.cpp | 104 ++++-------------- .../share/vm/memory/referenceProcessor.hpp | 13 --- .../share/vm/oops/instanceRefKlass.inline.hpp | 31 ++---- hotspot/src/share/vm/runtime/java.cpp | 8 +- hotspot/src/share/vm/runtime/java.hpp | 18 +-- 8 files changed, 78 insertions(+), 204 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp index db17a8a6b28..4528b1c4047 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @@ -257,31 +257,18 @@ static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, } } T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - // Treat discovered as normal oop, if ref is not "active", - // i.e. if next is non-NULL. - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - PTR_FORMAT, p2i(discovered_addr)); - } - ) - cm->mark_and_push(discovered_addr); - } - } else { -#ifdef ASSERT - // In the case of older JDKs which do not use the discovered - // field for the pending list, an inactive ref (next != NULL) - // must always have a NULL discovered field. - T next = oopDesc::load_heap_oop(next_addr); - oop discovered = java_lang_ref_Reference::discovered(obj); - assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", - p2i(obj))); -#endif + // Treat discovered as normal oop, if ref is not "active", + // i.e. if next is non-NULL. + T next_oop = oopDesc::load_heap_oop(next_addr); + if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(discovered_addr)); + } + ) + cm->mark_and_push(discovered_addr); } cm->mark_and_push(next_addr); klass->InstanceKlass::oop_pc_follow_contents(obj, cm); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp index 29076516b47..acd2475b7ff 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @@ -365,33 +365,19 @@ static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, P // Treat discovered as normal oop, if ref is not "active", // i.e. if next is non-NULL. T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - PTR_FORMAT, p2i(discovered_addr)); - } - ) - if (PSScavenge::should_scavenge(discovered_addr)) { - pm->claim_or_forward_depth(discovered_addr); + T next_oop = oopDesc::load_heap_oop(next_addr); + if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(discovered_addr)); } + ) + if (PSScavenge::should_scavenge(discovered_addr)) { + pm->claim_or_forward_depth(discovered_addr); } - } else { -#ifdef ASSERT - // In the case of older JDKs which do not use the discovered - // field for the pending list, an inactive ref (next != NULL) - // must always have a NULL discovered field. - oop next = oopDesc::load_decode_heap_oop(next_addr); - oop discovered = java_lang_ref_Reference::discovered(obj); - assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", - p2i(obj))); -#endif } - // Treat next as normal oop; next is a link in the reference queue. if (PSScavenge::should_scavenge(next_addr)) { pm->claim_or_forward_depth(next_addr); diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp index e459341ba03..6808460d362 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp @@ -145,31 +145,18 @@ static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) } } T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - // Treat discovered as normal oop, if ref is not "active", - // i.e. if next is non-NULL. - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - PTR_FORMAT, p2i(discovered_addr)); - } - ) - MarkSweep::mark_and_push(discovered_addr); - } - } else { -#ifdef ASSERT - // In the case of older JDKs which do not use the discovered - // field for the pending list, an inactive ref (next != NULL) - // must always have a NULL discovered field. - oop next = oopDesc::load_decode_heap_oop(next_addr); - oop discovered = java_lang_ref_Reference::discovered(obj); - assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", - p2i(obj))); -#endif + // Treat discovered as normal oop, if ref is not "active", + // i.e. if next is non-NULL. + T next_oop = oopDesc::load_heap_oop(next_addr); + if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(discovered_addr)); + } + ) + MarkSweep::mark_and_push(discovered_addr); } // treat next as normal oop. next is a link in the reference queue. debug_only( diff --git a/hotspot/src/share/vm/memory/referenceProcessor.cpp b/hotspot/src/share/vm/memory/referenceProcessor.cpp index 8943a5e675f..64ab742b9c4 100644 --- a/hotspot/src/share/vm/memory/referenceProcessor.cpp +++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp @@ -37,7 +37,6 @@ ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; -bool ReferenceProcessor::_pending_list_uses_discovered_field = false; jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; void referenceProcessor_init() { @@ -63,7 +62,6 @@ void ReferenceProcessor::init_statics() { guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery, "Unrecognized RefDiscoveryPolicy"); - _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); } void ReferenceProcessor::enable_discovery(bool check_no_refs) { @@ -353,10 +351,6 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, // all linked Reference objects. Note that it is important to not dirty any // cards during reference processing since this will cause card table // verification to fail for G1. - // - // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), - // the "next" field is used to chain the pending list, not the discovered - // field. if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(refs_list.head())); @@ -364,64 +358,30 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, oop obj = NULL; oop next_d = refs_list.head(); - if (pending_list_uses_discovered_field()) { // New behavior - // Walk down the list, self-looping the next field - // so that the References are not considered active. - while (obj != next_d) { - obj = next_d; - assert(obj->is_instanceRef(), "should be reference object"); - next_d = java_lang_ref_Reference::discovered(obj); - if (TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, - p2i(obj), p2i(next_d)); - } - assert(java_lang_ref_Reference::next(obj) == NULL, - "Reference not active; should not be discovered"); - // Self-loop next, so as to make Ref not active. - java_lang_ref_Reference::set_next_raw(obj, obj); - if (next_d != obj) { - oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); - } else { - // This is the last object. - // Swap refs_list into pending_list_addr and - // set obj's discovered to what we read from pending_list_addr. - oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); - // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. - java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL - oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); - } + // Walk down the list, self-looping the next field + // so that the References are not considered active. + while (obj != next_d) { + obj = next_d; + assert(obj->is_instanceRef(), "should be reference object"); + next_d = java_lang_ref_Reference::discovered(obj); + if (TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, + p2i(obj), p2i(next_d)); } - } else { // Old behavior - // Walk down the list, copying the discovered field into - // the next field and clearing the discovered field. - while (obj != next_d) { - obj = next_d; - assert(obj->is_instanceRef(), "should be reference object"); - next_d = java_lang_ref_Reference::discovered(obj); - if (TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, - p2i(obj), p2i(next_d)); - } - assert(java_lang_ref_Reference::next(obj) == NULL, - "The reference should not be enqueued"); - if (next_d == obj) { // obj is last - // Swap refs_list into pending_list_addr and - // set obj's next to what we read from pending_list_addr. - oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); - // Need oop_check on pending_list_addr above; - // see special oop-check code at the end of - // enqueue_discovered_reflists() further below. - if (old == NULL) { - // obj should be made to point to itself, since - // pending list was empty. - java_lang_ref_Reference::set_next(obj, obj); - } else { - java_lang_ref_Reference::set_next(obj, old); - } - } else { - java_lang_ref_Reference::set_next(obj, next_d); - } - java_lang_ref_Reference::set_discovered(obj, (oop) NULL); + assert(java_lang_ref_Reference::next(obj) == NULL, + "Reference not active; should not be discovered"); + // Self-loop next, so as to make Ref not active. + java_lang_ref_Reference::set_next_raw(obj, obj); + if (next_d != obj) { + oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); + } else { + // This is the last object. + // Swap refs_list into pending_list_addr and + // set obj's discovered to what we read from pending_list_addr. + oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); + // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. + java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL + oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); } } } @@ -515,22 +475,6 @@ void DiscoveredListIterator::remove() { _refs_list.dec_length(1); } -// Make the Reference object active again. -void DiscoveredListIterator::make_active() { - // The pre barrier for G1 is probably just needed for the old - // reference processing behavior. Should we guard this with - // ReferenceProcessor::pending_list_uses_discovered_field() ? - if (UseG1GC) { - HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); - if (UseCompressedOops) { - oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); - } else { - oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); - } - } - java_lang_ref_Reference::set_next_raw(_ref, NULL); -} - void DiscoveredListIterator::clear_referent() { oop_store_raw(_referent_addr, NULL); } @@ -567,8 +511,6 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list, } // Remove Reference object from list iter.remove(); - // Make the Reference object active again - iter.make_active(); // keep the referent around iter.make_referent_alive(); iter.move_to_next(); diff --git a/hotspot/src/share/vm/memory/referenceProcessor.hpp b/hotspot/src/share/vm/memory/referenceProcessor.hpp index 6dd50e5a936..0f7b6f77673 100644 --- a/hotspot/src/share/vm/memory/referenceProcessor.hpp +++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp @@ -161,9 +161,6 @@ public: // Remove the current reference from the list void remove(); - // Make the Reference object active again. - void make_active(); - // Make the referent alive. inline void make_referent_alive() { if (UseCompressedOops) { @@ -200,9 +197,6 @@ class ReferenceProcessor : public CHeapObj { size_t total_count(DiscoveredList lists[]); protected: - // Compatibility with pre-4965777 JDK's - static bool _pending_list_uses_discovered_field; - // The SoftReference master timestamp clock static jlong _soft_ref_timestamp_clock; @@ -421,13 +415,6 @@ class ReferenceProcessor : public CHeapObj { bool discovery_is_atomic() const { return _discovery_is_atomic; } void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } - // whether the JDK in which we are embedded is a pre-4965777 JDK, - // and thus whether or not it uses the discovered field to chain - // the entries in the pending list. - static bool pending_list_uses_discovered_field() { - return _pending_list_uses_discovered_field; - } - // whether discovery is done by multiple threads same-old-timeously bool discovery_is_mt() const { return _discovery_is_mt; } void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } diff --git a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp index 95d1b385f36..56cbf5e0f50 100644 --- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp +++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp @@ -55,30 +55,17 @@ void InstanceRefKlass::oop_oop_iterate_ref_processing_specialized(oop obj, OopCl } } T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - T next_oop = oopDesc::load_heap_oop(next_addr); - // Treat discovered as normal oop, if ref is not "active" (next non-NULL) - if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { - // i.e. ref is not "active" - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - PTR_FORMAT, p2i(disc_addr)); - } - ) - Devirtualizer::do_oop(closure, disc_addr); - } - } else { - // In the case of older JDKs which do not use the discovered field for - // the pending list, an inactive ref (next != NULL) must always have a - // NULL discovered field. + T next_oop = oopDesc::load_heap_oop(next_addr); + // Treat discovered as normal oop, if ref is not "active" (next non-NULL) + if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { + // i.e. ref is not "active" debug_only( - T next_oop = oopDesc::load_heap_oop(next_addr); - T disc_oop = oopDesc::load_heap_oop(disc_addr); - assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" - "discovered field", p2i(obj))); + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(disc_addr)); + } ) + Devirtualizer::do_oop(closure, disc_addr); } // treat next as normal oop if (contains(next_addr)) { diff --git a/hotspot/src/share/vm/runtime/java.cpp b/hotspot/src/share/vm/runtime/java.cpp index 0263a501d12..896c676407d 100644 --- a/hotspot/src/share/vm/runtime/java.cpp +++ b/hotspot/src/share/vm/runtime/java.cpp @@ -651,11 +651,15 @@ void JDK_Version::initialize() { minor = micro; micro = 0; } + // Incompatible with pre-4243978 JDK. + if (info.pending_list_uses_discovered_field == 0) { + vm_exit_during_initialization( + "Incompatible JDK is not using Reference.discovered field for pending list"); + } _current = JDK_Version(major, minor, micro, info.update_version, info.special_update_version, build, info.thread_park_blocker == 1, - info.post_vm_init_hook_enabled == 1, - info.pending_list_uses_discovered_field == 1); + info.post_vm_init_hook_enabled == 1); } } diff --git a/hotspot/src/share/vm/runtime/java.hpp b/hotspot/src/share/vm/runtime/java.hpp index 716a3bf40d4..703f5060cbf 100644 --- a/hotspot/src/share/vm/runtime/java.hpp +++ b/hotspot/src/share/vm/runtime/java.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,7 +91,6 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC { bool _partially_initialized; bool _thread_park_blocker; - bool _pending_list_uses_discovered_field; bool _post_vm_init_hook_enabled; bool is_valid() const { @@ -114,18 +113,17 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC { JDK_Version() : _major(0), _minor(0), _micro(0), _update(0), _special(0), _build(0), _partially_initialized(false), - _thread_park_blocker(false), _post_vm_init_hook_enabled(false), - _pending_list_uses_discovered_field(false) {} + _thread_park_blocker(false), _post_vm_init_hook_enabled(false) + {} JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0, uint8_t update = 0, uint8_t special = 0, uint8_t build = 0, - bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false, - bool pending_list_uses_discovered_field = false) : + bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false) : _major(major), _minor(minor), _micro(micro), _update(update), _special(special), _build(build), _partially_initialized(false), _thread_park_blocker(thread_park_blocker), - _post_vm_init_hook_enabled(post_vm_init_hook_enabled), - _pending_list_uses_discovered_field(pending_list_uses_discovered_field) {} + _post_vm_init_hook_enabled(post_vm_init_hook_enabled) + {} // Returns the current running JDK version static JDK_Version current() { return _current; } @@ -152,10 +150,6 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC { bool post_vm_init_hook_enabled() const { return _post_vm_init_hook_enabled; } - // For compatibility wrt pre-4965777 JDK's - bool pending_list_uses_discovered_field() const { - return _pending_list_uses_discovered_field; - } // Performs a full ordering comparison using all fields (update, build, etc.) int compare(const JDK_Version& other) const; From 6d5aa2af43770be1429d1e056f7a1c8a6e995ca2 Mon Sep 17 00:00:00 2001 From: Bengt Rutisson Date: Mon, 27 Apr 2015 09:08:07 +0200 Subject: [PATCH 08/19] 8078613: HAS_BEEN_MOVED has been moved Reviewed-by: stefank, sjohanss --- .../parallelScavenge/psCompactionManager.hpp | 5 ----- .../parallelScavenge/psPromotionManager.hpp | 4 ---- 2 files changed, 9 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp index 9c281725a4c..305eb18672c 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp @@ -29,11 +29,6 @@ #include "utilities/stack.hpp" #include "utilities/taskqueue.hpp" -// Move to some global location -#define HAS_BEEN_MOVED 0x1501d01d -// End move to some global location - - class MutableSpace; class PSOldGen; class ParCompactionManager; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp index d98ed220efd..9b39ee32cdd 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp @@ -45,10 +45,6 @@ // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate! // -// Move to some global location -#define HAS_BEEN_MOVED 0x1501d01d -// End move to some global location - class MutableSpace; class PSOldGen; class ParCompactionManager; From f7ea0b72ebe63bd66b54787ace82ee71df943ecb Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Mon, 27 Apr 2015 09:51:06 +0200 Subject: [PATCH 09/19] 8076177: Remove usage of stack.inline.hpp functions from taskqueue.hpp Reviewed-by: brutisso, goetz --- .../cmsOopClosures.inline.hpp | 1 + .../concurrentMarkSweepGeneration.cpp | 1 + .../gc_implementation/g1/concurrentMark.cpp | 5 + .../gc_implementation/g1/concurrentMark.hpp | 4 +- .../g1/concurrentMark.inline.hpp | 1 + .../gc_implementation/g1/g1CollectedHeap.cpp | 1 + .../g1/g1ParScanThreadState.cpp | 2 +- .../g1/g1ParScanThreadState.hpp | 5 +- .../g1/g1ParScanThreadState.inline.hpp | 5 + .../parNew/parNewGeneration.cpp | 1 + .../parallelScavenge/psCompactionManager.cpp | 2 +- .../parallelScavenge/psCompactionManager.hpp | 14 +- .../psCompactionManager.inline.hpp | 13 + .../parallelScavenge/psPromotionManager.cpp | 2 +- .../parallelScavenge/psPromotionManager.hpp | 8 +- .../psPromotionManager.inline.hpp | 12 +- .../parallelScavenge/psTasks.cpp | 3 +- hotspot/src/share/vm/utilities/taskqueue.hpp | 243 +-------------- .../share/vm/utilities/taskqueue.inline.hpp | 279 ++++++++++++++++++ 19 files changed, 331 insertions(+), 271 deletions(-) create mode 100644 hotspot/src/share/vm/utilities/taskqueue.inline.hpp diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp index 2980f0d272f..bc8a6107ccf 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp @@ -28,6 +28,7 @@ #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" #include "oops/oop.inline.hpp" +#include "utilities/taskqueue.inline.hpp" // Trim our work_queue so its length is below max at return inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) { diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index 6729f0b23c2..18a777665a4 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -66,6 +66,7 @@ #include "services/memoryService.hpp" #include "services/runtimeService.hpp" #include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" // statics CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index efae52e60b2..4bc2e658e74 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -54,6 +54,7 @@ #include "runtime/atomic.inline.hpp" #include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" +#include "utilities/taskqueue.inline.hpp" // Concurrent marking bit map wrapper @@ -3758,6 +3759,10 @@ void CMTask::print_stats() { #endif // _MARKING_STATS_ } +bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { + return _task_queues->steal(worker_id, hash_seed, obj); +} + /***************************************************************************** The do_marking_step(time_target_ms, ...) method is the building diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp index 3702273c959..99d40b2c4cc 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp @@ -671,9 +671,7 @@ public: } // Attempts to steal an object from the task queues of other tasks - bool try_stealing(uint worker_id, int* hash_seed, oop& obj) { - return _task_queues->steal(worker_id, hash_seed, obj); - } + bool try_stealing(uint worker_id, int* hash_seed, oop& obj); ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp index a96c2dc584a..3d00aec250d 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp @@ -27,6 +27,7 @@ #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "utilities/taskqueue.inline.hpp" // Utility routine to set an exclusive range of cards on the given // card liveness bitmap diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 68e4815d732..3f8a6ce2002 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -66,6 +66,7 @@ #include "runtime/vmThread.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp index e7f6e18c4da..263aee71b60 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp @@ -29,7 +29,7 @@ #include "gc_implementation/g1/g1StringDedup.hpp" #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" -#include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) : _g1h(g1h), diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp index 0b69122e606..8f11372bad5 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp @@ -106,10 +106,7 @@ class G1ParScanThreadState : public StackObj { bool verify_task(StarTask ref) const; #endif // ASSERT - template void push_on_queue(T* ref) { - assert(verify_ref(ref), "sanity"); - _refs->push(ref); - } + template void push_on_queue(T* ref); template void update_rs(HeapRegion* from, T* p, uint tid) { // If the new value of the field points to the same region or diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp index 81c79806a1a..c0ff1e5b426 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp @@ -59,6 +59,11 @@ template void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from update_rs(from, p, queue_num()); } +template inline void G1ParScanThreadState::push_on_queue(T* ref) { + assert(verify_ref(ref), "sanity"); + _refs->push(ref); +} + inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { assert(has_partial_array_mask(p), "invariant"); oop from_obj = clear_partial_array_mask(p); diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp index 68b04be6b79..a01e127cb36 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -54,6 +54,7 @@ #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" #include "utilities/workgroup.hpp" #ifdef _MSC_VER diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp index 4528b1c4047..2351e4d9444 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @@ -37,7 +37,7 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" -#include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" PSOldGen* ParCompactionManager::_old_gen = NULL; ParCompactionManager** ParCompactionManager::_manager_array = NULL; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp index 305eb18672c..45cd344571b 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp @@ -179,17 +179,9 @@ private: // Access function for compaction managers static ParCompactionManager* gc_thread_compaction_manager(int index); - static bool steal(int queue_num, int* seed, oop& t) { - return stack_array()->steal(queue_num, seed, t); - } - - static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) { - return _objarray_queues->steal(queue_num, seed, t); - } - - static bool steal(int queue_num, int* seed, size_t& region) { - return region_array()->steal(queue_num, seed, region); - } + static bool steal(int queue_num, int* seed, oop& t); + static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t); + static bool steal(int queue_num, int* seed, size_t& region); // Process tasks remaining on any marking stack void follow_marking_stacks(); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp index afcc8034d5a..f1f4a570e11 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp @@ -31,6 +31,19 @@ #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/taskqueue.inline.hpp" + +inline bool ParCompactionManager::steal(int queue_num, int* seed, oop& t) { + return stack_array()->steal(queue_num, seed, t); +} + +inline bool ParCompactionManager::steal_objarray(int queue_num, int* seed, ObjArrayTask& t) { + return _objarray_queues->steal(queue_num, seed, t); +} + +inline bool ParCompactionManager::steal(int queue_num, int* seed, size_t& region) { + return region_array()->steal(queue_num, seed, region); +} inline void ParCompactionManager::push(oop obj) { _marking_stack.push(obj); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp index acd2475b7ff..0d15eaadaa4 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @@ -36,7 +36,7 @@ #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" -#include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" PaddedEnd* PSPromotionManager::_manager_array = NULL; OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp index 9b39ee32cdd..1612d9a85bb 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp @@ -139,9 +139,7 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC { int start, int end); void process_array_chunk(oop old); - template void push_depth(T* p) { - claimed_stack_depth()->push(p); - } + template void push_depth(T* p); inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, uint age, bool tenured, @@ -159,9 +157,7 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC { static PSPromotionManager* gc_thread_promotion_manager(int index); static PSPromotionManager* vm_thread_promotion_manager(); - static bool steal_depth(int queue_num, int* seed, StarTask& t) { - return stack_array_depth()->steal(queue_num, seed, t); - } + static bool steal_depth(int queue_num, int* seed, StarTask& t); PSPromotionManager(); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp index 301136fc986..0354a0bb961 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @@ -31,6 +31,7 @@ #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "oops/oop.inline.hpp" +#include "utilities/taskqueue.inline.hpp" inline PSPromotionManager* PSPromotionManager::manager_array(int index) { assert(_manager_array != NULL, "access of NULL manager_array"); @@ -38,6 +39,11 @@ inline PSPromotionManager* PSPromotionManager::manager_array(int index) { return &_manager_array[index]; } +template +inline void PSPromotionManager::push_depth(T* p) { + claimed_stack_depth()->push(p); +} + template inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { if (p != NULL) { // XXX: error if p != NULL here @@ -99,7 +105,7 @@ inline void PSPromotionManager::push_contents(oop obj) { // performance. // template -oop PSPromotionManager::copy_to_survivor_space(oop o) { +inline oop PSPromotionManager::copy_to_survivor_space(oop o) { assert(should_scavenge(&o), "Sanity"); oop new_obj = NULL; @@ -317,6 +323,10 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) { } } +inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) { + return stack_array_depth()->steal(queue_num, seed, t); +} + #if TASKQUEUE_STATS void PSPromotionManager::record_steal(StarTask& p) { if (is_oop_masked(p)) { diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp index c9e7ee7171a..d9ec123368c 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp @@ -39,8 +39,7 @@ #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" -#include "utilities/stack.inline.hpp" -#include "utilities/taskqueue.hpp" +#include "utilities/taskqueue.inline.hpp" // // ScavengeRootsTask diff --git a/hotspot/src/share/vm/utilities/taskqueue.hpp b/hotspot/src/share/vm/utilities/taskqueue.hpp index da30b2c8834..3f45b49a840 100644 --- a/hotspot/src/share/vm/utilities/taskqueue.hpp +++ b/hotspot/src/share/vm/utilities/taskqueue.hpp @@ -26,9 +26,6 @@ #define SHARE_VM_UTILITIES_TASKQUEUE_HPP #include "memory/allocation.hpp" -#include "memory/allocation.inline.hpp" -#include "runtime/mutex.hpp" -#include "runtime/orderAccess.inline.hpp" #include "utilities/stack.hpp" // Simple TaskQueue stats that are collected by default in debug builds. @@ -134,11 +131,7 @@ protected: if (_fields._top == 0) ++_fields._tag; } - Age cmpxchg(const Age new_age, const Age old_age) volatile { - return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, - (volatile intptr_t *)&_data, - (intptr_t)old_age._data); - } + Age cmpxchg(const Age new_age, const Age old_age) volatile; bool operator ==(const Age& other) const { return _data == other._data; } @@ -315,121 +308,6 @@ GenericTaskQueue::GenericTaskQueue() { assert(sizeof(Age) == sizeof(size_t), "Depends on this."); } -template -void GenericTaskQueue::initialize() { - _elems = _array_allocator.allocate(N); -} - -template -void GenericTaskQueue::oops_do(OopClosure* f) { - // tty->print_cr("START OopTaskQueue::oops_do"); - uint iters = size(); - uint index = _bottom; - for (uint i = 0; i < iters; ++i) { - index = decrement_index(index); - // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, - // index, &_elems[index], _elems[index]); - E* t = (E*)&_elems[index]; // cast away volatility - oop* p = (oop*)t; - assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t))); - f->do_oop(p); - } - // tty->print_cr("END OopTaskQueue::oops_do"); -} - -template -bool GenericTaskQueue::push_slow(E t, uint dirty_n_elems) { - if (dirty_n_elems == N - 1) { - // Actually means 0, so do the push. - uint localBot = _bottom; - // g++ complains if the volatile result of the assignment is - // unused, so we cast the volatile away. We cannot cast directly - // to void, because gcc treats that as not using the result of the - // assignment. However, casting to E& means that we trigger an - // unused-value warning. So, we cast the E& to void. - (void)const_cast(_elems[localBot] = t); - OrderAccess::release_store(&_bottom, increment_index(localBot)); - TASKQUEUE_STATS_ONLY(stats.record_push()); - return true; - } - return false; -} - -// pop_local_slow() is done by the owning thread and is trying to -// get the last task in the queue. It will compete with pop_global() -// that will be used by other threads. The tag age is incremented -// whenever the queue goes empty which it will do here if this thread -// gets the last task or in pop_global() if the queue wraps (top == 0 -// and pop_global() succeeds, see pop_global()). -template -bool GenericTaskQueue::pop_local_slow(uint localBot, Age oldAge) { - // This queue was observed to contain exactly one element; either this - // thread will claim it, or a competing "pop_global". In either case, - // the queue will be logically empty afterwards. Create a new Age value - // that represents the empty queue for the given value of "_bottom". (We - // must also increment "tag" because of the case where "bottom == 1", - // "top == 0". A pop_global could read the queue element in that case, - // then have the owner thread do a pop followed by another push. Without - // the incrementing of "tag", the pop_global's CAS could succeed, - // allowing it to believe it has claimed the stale element.) - Age newAge((idx_t)localBot, oldAge.tag() + 1); - // Perhaps a competing pop_global has already incremented "top", in which - // case it wins the element. - if (localBot == oldAge.top()) { - // No competing pop_global has yet incremented "top"; we'll try to - // install new_age, thus claiming the element. - Age tempAge = _age.cmpxchg(newAge, oldAge); - if (tempAge == oldAge) { - // We win. - assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); - TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); - return true; - } - } - // We lose; a completing pop_global gets the element. But the queue is empty - // and top is greater than bottom. Fix this representation of the empty queue - // to become the canonical one. - _age.set(newAge); - assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); - return false; -} - -template -bool GenericTaskQueue::pop_global(volatile E& t) { - Age oldAge = _age.get(); - // Architectures with weak memory model require a barrier here - // to guarantee that bottom is not older than age, - // which is crucial for the correctness of the algorithm. -#if !(defined SPARC || defined IA32 || defined AMD64) - OrderAccess::fence(); -#endif - uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); - uint n_elems = size(localBot, oldAge.top()); - if (n_elems == 0) { - return false; - } - - // g++ complains if the volatile result of the assignment is - // unused, so we cast the volatile away. We cannot cast directly - // to void, because gcc treats that as not using the result of the - // assignment. However, casting to E& means that we trigger an - // unused-value warning. So, we cast the E& to void. - (void) const_cast(t = _elems[oldAge.top()]); - Age newAge(oldAge); - newAge.increment(); - Age resAge = _age.cmpxchg(newAge, oldAge); - - // Note that using "_bottom" here might fail, since a pop_local might - // have decremented it. - assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); - return resAge == oldAge; -} - -template -GenericTaskQueue::~GenericTaskQueue() { - FREE_C_HEAP_ARRAY(E, _elems); -} - // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for // elements that do not fit in the TaskQueue. // @@ -468,24 +346,6 @@ private: overflow_t _overflow_stack; }; -template -bool OverflowTaskQueue::push(E t) -{ - if (!taskqueue_t::push(t)) { - overflow_stack()->push(t); - TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); - } - return true; -} - -template -bool OverflowTaskQueue::pop_overflow(E& t) -{ - if (overflow_empty()) return false; - t = overflow_stack()->pop(); - return true; -} - class TaskQueueSetSuper { protected: static int randomParkAndMiller(int* seed0); @@ -506,13 +366,7 @@ private: public: typedef typename T::element_type E; - GenericTaskQueueSet(int n) : _n(n) { - typedef T* GenericTaskQueuePtr; - _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); - for (int i = 0; i < n; i++) { - _queues[i] = NULL; - } - } + GenericTaskQueueSet(int n); bool steal_best_of_2(uint queue_num, int* seed, E& t); @@ -541,40 +395,6 @@ GenericTaskQueueSet::queue(uint i) { return _queues[i]; } -template bool -GenericTaskQueueSet::steal(uint queue_num, int* seed, E& t) { - for (uint i = 0; i < 2 * _n; i++) { - if (steal_best_of_2(queue_num, seed, t)) { - TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); - return true; - } - } - TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); - return false; -} - -template bool -GenericTaskQueueSet::steal_best_of_2(uint queue_num, int* seed, E& t) { - if (_n > 2) { - uint k1 = queue_num; - while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; - uint k2 = queue_num; - while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; - // Sample both and try the larger. - uint sz1 = _queues[k1]->size(); - uint sz2 = _queues[k2]->size(); - if (sz2 > sz1) return _queues[k2]->pop_global(t); - else return _queues[k1]->pop_global(t); - } else if (_n == 2) { - // Just try the other one. - uint k = (queue_num + 1) % 2; - return _queues[k]->pop_global(t); - } else { - assert(_n == 1, "can't be zero."); - return false; - } -} - template bool GenericTaskQueueSet::peek() { // Try all the queues. @@ -649,65 +469,6 @@ public: #endif }; -template inline bool -GenericTaskQueue::push(E t) { - uint localBot = _bottom; - assert(localBot < N, "_bottom out of range."); - idx_t top = _age.top(); - uint dirty_n_elems = dirty_size(localBot, top); - assert(dirty_n_elems < N, "n_elems out of range."); - if (dirty_n_elems < max_elems()) { - // g++ complains if the volatile result of the assignment is - // unused, so we cast the volatile away. We cannot cast directly - // to void, because gcc treats that as not using the result of the - // assignment. However, casting to E& means that we trigger an - // unused-value warning. So, we cast the E& to void. - (void) const_cast(_elems[localBot] = t); - OrderAccess::release_store(&_bottom, increment_index(localBot)); - TASKQUEUE_STATS_ONLY(stats.record_push()); - return true; - } else { - return push_slow(t, dirty_n_elems); - } -} - -template inline bool -GenericTaskQueue::pop_local(volatile E& t) { - uint localBot = _bottom; - // This value cannot be N-1. That can only occur as a result of - // the assignment to bottom in this method. If it does, this method - // resets the size to 0 before the next call (which is sequential, - // since this is pop_local.) - uint dirty_n_elems = dirty_size(localBot, _age.top()); - assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); - if (dirty_n_elems == 0) return false; - localBot = decrement_index(localBot); - _bottom = localBot; - // This is necessary to prevent any read below from being reordered - // before the store just above. - OrderAccess::fence(); - // g++ complains if the volatile result of the assignment is - // unused, so we cast the volatile away. We cannot cast directly - // to void, because gcc treats that as not using the result of the - // assignment. However, casting to E& means that we trigger an - // unused-value warning. So, we cast the E& to void. - (void) const_cast(t = _elems[localBot]); - // This is a second read of "age"; the "size()" above is the first. - // If there's still at least one element in the queue, based on the - // "_bottom" and "age" we've read, then there can be no interference with - // a "pop_global" operation, and we're done. - idx_t tp = _age.top(); // XXX - if (size(localBot, tp) > 0) { - assert(dirty_size(localBot, tp) != N - 1, "sanity"); - TASKQUEUE_STATS_ONLY(stats.record_pop()); - return true; - } else { - // Otherwise, the queue contained exactly one element; we take the slow - // path. - return pop_local_slow(localBot, _age.get()); - } -} - typedef GenericTaskQueue OopTaskQueue; typedef GenericTaskQueueSet OopTaskQueueSet; diff --git a/hotspot/src/share/vm/utilities/taskqueue.inline.hpp b/hotspot/src/share/vm/utilities/taskqueue.inline.hpp new file mode 100644 index 00000000000..9f4eb3ea773 --- /dev/null +++ b/hotspot/src/share/vm/utilities/taskqueue.inline.hpp @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP +#define SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP + +#include "memory/allocation.inline.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/taskqueue.hpp" +#include "utilities/stack.inline.hpp" +#include "runtime/atomic.inline.hpp" +#include "runtime/orderAccess.inline.hpp" + +template +inline GenericTaskQueueSet::GenericTaskQueueSet(int n) : _n(n) { + typedef T* GenericTaskQueuePtr; + _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); + for (int i = 0; i < n; i++) { + _queues[i] = NULL; + } +} + +template +inline void GenericTaskQueue::initialize() { + _elems = _array_allocator.allocate(N); +} + +template +inline GenericTaskQueue::~GenericTaskQueue() { + FREE_C_HEAP_ARRAY(E, _elems); +} + +template +bool GenericTaskQueue::push_slow(E t, uint dirty_n_elems) { + if (dirty_n_elems == N - 1) { + // Actually means 0, so do the push. + uint localBot = _bottom; + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void)const_cast(_elems[localBot] = t); + OrderAccess::release_store(&_bottom, increment_index(localBot)); + TASKQUEUE_STATS_ONLY(stats.record_push()); + return true; + } + return false; +} + +template inline bool +GenericTaskQueue::push(E t) { + uint localBot = _bottom; + assert(localBot < N, "_bottom out of range."); + idx_t top = _age.top(); + uint dirty_n_elems = dirty_size(localBot, top); + assert(dirty_n_elems < N, "n_elems out of range."); + if (dirty_n_elems < max_elems()) { + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(_elems[localBot] = t); + OrderAccess::release_store(&_bottom, increment_index(localBot)); + TASKQUEUE_STATS_ONLY(stats.record_push()); + return true; + } else { + return push_slow(t, dirty_n_elems); + } +} + +template +inline bool OverflowTaskQueue::push(E t) +{ + if (!taskqueue_t::push(t)) { + overflow_stack()->push(t); + TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); + } + return true; +} + +// pop_local_slow() is done by the owning thread and is trying to +// get the last task in the queue. It will compete with pop_global() +// that will be used by other threads. The tag age is incremented +// whenever the queue goes empty which it will do here if this thread +// gets the last task or in pop_global() if the queue wraps (top == 0 +// and pop_global() succeeds, see pop_global()). +template +bool GenericTaskQueue::pop_local_slow(uint localBot, Age oldAge) { + // This queue was observed to contain exactly one element; either this + // thread will claim it, or a competing "pop_global". In either case, + // the queue will be logically empty afterwards. Create a new Age value + // that represents the empty queue for the given value of "_bottom". (We + // must also increment "tag" because of the case where "bottom == 1", + // "top == 0". A pop_global could read the queue element in that case, + // then have the owner thread do a pop followed by another push. Without + // the incrementing of "tag", the pop_global's CAS could succeed, + // allowing it to believe it has claimed the stale element.) + Age newAge((idx_t)localBot, oldAge.tag() + 1); + // Perhaps a competing pop_global has already incremented "top", in which + // case it wins the element. + if (localBot == oldAge.top()) { + // No competing pop_global has yet incremented "top"; we'll try to + // install new_age, thus claiming the element. + Age tempAge = _age.cmpxchg(newAge, oldAge); + if (tempAge == oldAge) { + // We win. + assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); + TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); + return true; + } + } + // We lose; a completing pop_global gets the element. But the queue is empty + // and top is greater than bottom. Fix this representation of the empty queue + // to become the canonical one. + _age.set(newAge); + assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); + return false; +} + +template inline bool +GenericTaskQueue::pop_local(volatile E& t) { + uint localBot = _bottom; + // This value cannot be N-1. That can only occur as a result of + // the assignment to bottom in this method. If it does, this method + // resets the size to 0 before the next call (which is sequential, + // since this is pop_local.) + uint dirty_n_elems = dirty_size(localBot, _age.top()); + assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); + if (dirty_n_elems == 0) return false; + localBot = decrement_index(localBot); + _bottom = localBot; + // This is necessary to prevent any read below from being reordered + // before the store just above. + OrderAccess::fence(); + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(t = _elems[localBot]); + // This is a second read of "age"; the "size()" above is the first. + // If there's still at least one element in the queue, based on the + // "_bottom" and "age" we've read, then there can be no interference with + // a "pop_global" operation, and we're done. + idx_t tp = _age.top(); // XXX + if (size(localBot, tp) > 0) { + assert(dirty_size(localBot, tp) != N - 1, "sanity"); + TASKQUEUE_STATS_ONLY(stats.record_pop()); + return true; + } else { + // Otherwise, the queue contained exactly one element; we take the slow + // path. + return pop_local_slow(localBot, _age.get()); + } +} + +template +bool OverflowTaskQueue::pop_overflow(E& t) +{ + if (overflow_empty()) return false; + t = overflow_stack()->pop(); + return true; +} + +template +bool GenericTaskQueue::pop_global(volatile E& t) { + Age oldAge = _age.get(); + // Architectures with weak memory model require a barrier here + // to guarantee that bottom is not older than age, + // which is crucial for the correctness of the algorithm. +#if !(defined SPARC || defined IA32 || defined AMD64) + OrderAccess::fence(); +#endif + uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); + uint n_elems = size(localBot, oldAge.top()); + if (n_elems == 0) { + return false; + } + + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(t = _elems[oldAge.top()]); + Age newAge(oldAge); + newAge.increment(); + Age resAge = _age.cmpxchg(newAge, oldAge); + + // Note that using "_bottom" here might fail, since a pop_local might + // have decremented it. + assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); + return resAge == oldAge; +} + +template bool +GenericTaskQueueSet::steal_best_of_2(uint queue_num, int* seed, E& t) { + if (_n > 2) { + uint k1 = queue_num; + while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; + uint k2 = queue_num; + while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; + // Sample both and try the larger. + uint sz1 = _queues[k1]->size(); + uint sz2 = _queues[k2]->size(); + if (sz2 > sz1) return _queues[k2]->pop_global(t); + else return _queues[k1]->pop_global(t); + } else if (_n == 2) { + // Just try the other one. + uint k = (queue_num + 1) % 2; + return _queues[k]->pop_global(t); + } else { + assert(_n == 1, "can't be zero."); + return false; + } +} + +template bool +GenericTaskQueueSet::steal(uint queue_num, int* seed, E& t) { + for (uint i = 0; i < 2 * _n; i++) { + if (steal_best_of_2(queue_num, seed, t)) { + TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); + return true; + } + } + TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); + return false; +} + +template +inline typename TaskQueueSuper::Age TaskQueueSuper::Age::cmpxchg(const Age new_age, const Age old_age) volatile { + return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, + (volatile intptr_t *)&_data, + (intptr_t)old_age._data); +} + +template +inline void GenericTaskQueue::oops_do(OopClosure* f) { + // tty->print_cr("START OopTaskQueue::oops_do"); + uint iters = size(); + uint index = _bottom; + for (uint i = 0; i < iters; ++i) { + index = decrement_index(index); + // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, + // index, &_elems[index], _elems[index]); + E* t = (E*)&_elems[index]; // cast away volatility + oop* p = (oop*)t; + assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t))); + f->do_oop(p); + } + // tty->print_cr("END OopTaskQueue::oops_do"); +} + + +#endif // SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP From f171cd3a863083047e7d5b9d3dd57a80e5223af7 Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Mon, 27 Apr 2015 09:51:13 +0200 Subject: [PATCH 10/19] 8078601: print_concurrent_locks should be guarded with INCLUDE_SERVICES Reviewed-by: mgronlun, sla, dholmes --- hotspot/src/share/vm/runtime/thread.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp index 19721c5650d..1257944206f 100644 --- a/hotspot/src/share/vm/runtime/thread.cpp +++ b/hotspot/src/share/vm/runtime/thread.cpp @@ -4210,13 +4210,13 @@ void Threads::print_on(outputStream* st, bool print_stacks, Abstract_VM_Version::vm_info_string()); st->cr(); -#if INCLUDE_ALL_GCS +#if INCLUDE_SERVICES // Dump concurrent locks ConcurrentLocksDump concurrent_locks; if (print_concurrent_locks) { concurrent_locks.dump_at_safepoint(); } -#endif // INCLUDE_ALL_GCS +#endif // INCLUDE_SERVICES ALL_JAVA_THREADS(p) { ResourceMark rm; @@ -4229,11 +4229,11 @@ void Threads::print_on(outputStream* st, bool print_stacks, } } st->cr(); -#if INCLUDE_ALL_GCS +#if INCLUDE_SERVICES if (print_concurrent_locks) { concurrent_locks.print_locks_on(p, st); } -#endif // INCLUDE_ALL_GCS +#endif // INCLUDE_SERVICES } VMThread::vm_thread()->print_on(st); From e08169c2530d89d1cb1adcc50d32cbebd6ed47df Mon Sep 17 00:00:00 2001 From: Thomas Schatzl Date: Mon, 27 Apr 2015 10:04:26 +0200 Subject: [PATCH 11/19] 8073632: Make auxiliary data structures know their own translation factor Auxiliary data structures should have knowledge of their own requirements for virtual memory reservations instead of getting these values directly from various places. Reviewed-by: stefank, kbarrett --- .../vm/gc_implementation/g1/concurrentMark.hpp | 5 +++++ .../vm/gc_implementation/g1/g1BlockOffsetTable.hpp | 7 ++++++- .../share/vm/gc_implementation/g1/g1CardCounts.cpp | 13 ++++++++++++- .../share/vm/gc_implementation/g1/g1CardCounts.hpp | 8 ++++++++ .../vm/gc_implementation/g1/g1CollectedHeap.cpp | 12 ++++++------ .../g1/g1SATBCardTableModRefBS.hpp | 5 +++++ 6 files changed, 42 insertions(+), 8 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp index 3702273c959..f9c76d920a5 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp @@ -139,6 +139,11 @@ class CMBitMap : public CMBitMapRO { static size_t compute_size(size_t heap_size); // Returns the amount of bytes on the heap between two marks in the bitmap. static size_t mark_distance(); + // Returns how many bytes (or bits) of the heap a single byte (or bit) of the + // mark bitmap corresponds to. This is the same as the mark distance above. + static size_t heap_map_factor() { + return mark_distance(); + } CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp index 3b81b08894a..ada7bf0b8d6 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -179,6 +179,11 @@ public: return ReservedSpace::allocation_align_size_up(number_of_slots); } + // Returns how many bytes of the heap a single byte of the BOT corresponds to. + static size_t heap_map_factor() { + return N_bytes; + } + enum SomePublicConstants { LogN = 9, LogN_words = LogN - LogHeapWordSize, diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp index 94f258afd6f..440c7fbc256 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,17 @@ void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_re _counts->clear_range(mr); } +size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) { + // We keep card counts for every card, so the size of the card counts table must + // be the same as the card table. + return G1SATBCardTableLoggingModRefBS::compute_size(mem_region_size_in_words); +} + +size_t G1CardCounts::heap_map_factor() { + // See G1CardCounts::compute_size() why we reuse the card table value. + return G1SATBCardTableLoggingModRefBS::heap_map_factor(); +} + void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) { if (has_count_table()) { assert(from_card_num < to_card_num, diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp index 6922818a790..5cb8d85099a 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp @@ -101,6 +101,14 @@ class G1CardCounts: public CHeapObj { public: G1CardCounts(G1CollectedHeap* g1h); + // Return the number of slots needed for a card counts table + // that covers mem_region_words words. + static size_t compute_size(size_t mem_region_size_in_words); + + // Returns how many bytes of the heap a single byte of the card counts table + // corresponds to. + static size_t heap_map_factor(); + void initialize(G1RegionToSpaceMapper* mapper); // Increments the refinement count for the given card. diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 68e4815d732..266a5c3e9ae 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1890,24 +1890,24 @@ jint G1CollectedHeap::initialize() { G1RegionToSpaceMapper* bot_storage = create_aux_memory_mapper("Block offset table", G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize), - G1BlockOffsetSharedArray::N_bytes); + G1BlockOffsetSharedArray::heap_map_factor()); ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* cardtable_storage = create_aux_memory_mapper("Card table", G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize), - G1BlockOffsetSharedArray::N_bytes); + G1SATBCardTableLoggingModRefBS::heap_map_factor()); G1RegionToSpaceMapper* card_counts_storage = create_aux_memory_mapper("Card counts table", - G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize), - G1BlockOffsetSharedArray::N_bytes); + G1CardCounts::compute_size(g1_rs.size() / HeapWordSize), + G1CardCounts::heap_map_factor()); size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); G1RegionToSpaceMapper* prev_bitmap_storage = - create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance()); + create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::heap_map_factor()); G1RegionToSpaceMapper* next_bitmap_storage = - create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance()); + create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::heap_map_factor()); _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); g1_barrier_set()->initialize(cardtable_storage); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp index c307815782d..2abdf30d237 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp @@ -153,6 +153,11 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS { return ReservedSpace::allocation_align_size_up(number_of_slots); } + // Returns how many bytes of the heap a single byte of the Card Table corresponds to. + static size_t heap_map_factor() { + return CardTableModRefBS::card_size; + } + G1SATBCardTableLoggingModRefBS(MemRegion whole_heap); virtual void initialize() { } From 62049de8e0e8ed8b39af619d241405c9be160fb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ingemar=20=C3=85berg?= Date: Tue, 28 Apr 2015 07:59:18 +0200 Subject: [PATCH 12/19] 8074016: Add convenient way of adding custom test targets to hotspot's test makefile Moved the line including the custom testlist to after variables are defined, allowing custom rules to use them Reviewed-by: erikj, sla --- hotspot/test/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hotspot/test/Makefile b/hotspot/test/Makefile index 93c4787d679..8cc0296e822 100644 --- a/hotspot/test/Makefile +++ b/hotspot/test/Makefile @@ -28,8 +28,6 @@ ALT_MAKE ?= closed --include $(ALT_MAKE)/Makefile - GETMIXEDPATH=echo # Utilities used @@ -304,6 +302,8 @@ else endif JTREG_BASIC_OPTIONS += $(JTREG_KEY_OPTION) +-include $(ALT_MAKE)/Makefile + # Make sure jtreg exists $(JTREG): $(JT_HOME) From e207b18f1082d999a45f9fccd65df75c55363079 Mon Sep 17 00:00:00 2001 From: Sangheon Kim Date: Tue, 28 Apr 2015 12:02:50 -0700 Subject: [PATCH 13/19] 8073204: Determining the desired PLAB size adjusts to the the number of threads at the wrong place Calculate the desired PLAB value for a single thread and then return desired PLAB size according to the current number of threads when needed Reviewed-by: ysr, jwilhelm, tschatzl --- .../vm/gc_implementation/g1/g1Allocator.cpp | 6 ++--- .../vm/gc_implementation/g1/g1Allocator.hpp | 4 +-- .../gc_implementation/g1/g1CollectedHeap.cpp | 2 +- .../gc_implementation/g1/g1CollectedHeap.hpp | 2 +- .../g1/g1CollectedHeap.inline.hpp | 2 +- .../parNew/parNewGeneration.cpp | 6 ++++- .../parNew/parNewGeneration.hpp | 4 +-- .../vm/gc_implementation/shared/plab.cpp | 16 +++++++++--- .../vm/gc_implementation/shared/plab.hpp | 25 +++++++++---------- 9 files changed, 38 insertions(+), 29 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp index e8b3bbc3a05..2c51df2f861 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp @@ -83,7 +83,7 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) &_retained_old_gc_alloc_region); } -void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { +void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) { AllocationContext_t context = AllocationContext::current(); evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() + old_gc_alloc_region(context)->count()); @@ -99,8 +99,8 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat } if (ResizePLAB) { - _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers); - _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers); + _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(); + _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(); } } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp index 2830a80ae91..4d63e96cb38 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp @@ -53,7 +53,7 @@ public: virtual void release_mutator_alloc_region() = 0; virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0; - virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0; + virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0; virtual void abandon_gc_alloc_regions() = 0; virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0; @@ -114,7 +114,7 @@ public: virtual void release_mutator_alloc_region(); virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info); - virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); + virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info); virtual void abandon_gc_alloc_regions(); virtual bool is_retained_old_region(HeapRegion* hr) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 266a5c3e9ae..34a9a5ff498 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -5438,7 +5438,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { phase_times->record_string_dedup_fixup_time(fixup_time_ms); } - _allocator->release_gc_alloc_regions(n_workers, evacuation_info); + _allocator->release_gc_alloc_regions(evacuation_info); g1_rem_set()->cleanup_after_oops_into_collection_set_do(); // Reset and re-enable the hot card cache. diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index f3f0338a087..c41aedfa4d3 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -276,7 +276,7 @@ private: void init_gc_alloc_regions(EvacuationInfo& evacuation_info); // It releases the GC alloc regions at the end of a GC. - void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); + void release_gc_alloc_regions(EvacuationInfo& evacuation_info); // It does any cleanup that needs to be done on the GC alloc regions // before a Full GC. diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp index fd4f138fbcb..cc2e1f1c586 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @@ -48,7 +48,7 @@ PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { } size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { - size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(); + size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers()); // Prevent humongous PLAB sizes for two reasons: // * PLABs are allocated using a similar paths as oops, but should // never be in a humongous region diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp index 107c807238b..32f3d55938e 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -1032,7 +1032,7 @@ void ParNewGeneration::collect(bool full, to()->set_concurrent_iteration_safe_limit(to()->top()); if (ResizePLAB) { - plab_stats()->adjust_desired_plab_sz(n_workers); + plab_stats()->adjust_desired_plab_sz(); } if (PrintGC && !PrintGCDetails) { @@ -1070,6 +1070,10 @@ void ParNewGeneration::collect(bool full, _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } +size_t ParNewGeneration::desired_plab_sz() { + return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers()); +} + static int sum; void ParNewGeneration::waste_some_time() { for (int i = 0; i < 100; i++) { diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp index cf6ca9a82e8..d429aef0f39 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp @@ -411,9 +411,7 @@ class ParNewGeneration: public DefNewGeneration { return &_plab_stats; } - size_t desired_plab_sz() { - return _plab_stats.desired_plab_sz(); - } + size_t desired_plab_sz(); const ParNewTracer* gc_tracer() const { return &_gc_tracer; diff --git a/hotspot/src/share/vm/gc_implementation/shared/plab.cpp b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp index 93eb5e94f14..758cee5361f 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/plab.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp @@ -109,10 +109,17 @@ void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) { } } -// Compute desired plab size and latch result for later +// Calculates plab size for current number of gc worker threads. +size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) { + assert(no_of_gc_workers > 0, "Number of GC workers should be larger than zero"); + + return align_object_size(_desired_net_plab_sz / MAX2(no_of_gc_workers, 1U)); +} + +// Compute desired plab size for one gc worker thread and latch result for later // use. This should be called once at the end of parallel // scavenge; it clears the sensor accumulators. -void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { +void PLABStats::adjust_desired_plab_sz() { assert(ResizePLAB, "Not set"); assert(is_object_aligned(max_size()) && min_size() <= max_size(), @@ -135,7 +142,8 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { target_refills = 1; } size_t used = _allocated - _wasted - _unused; - size_t recent_plab_sz = used / (target_refills * no_of_gc_workers); + // Assumed to have 1 gc worker thread + size_t recent_plab_sz = used / target_refills; // Take historical weighted average _filter.sample(recent_plab_sz); // Clip from above and below, and align to object boundary @@ -146,7 +154,7 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { if (PrintPLAB) { gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz); } - _desired_plab_sz = new_plab_sz; + _desired_net_plab_sz = new_plab_sz; reset(); } diff --git a/hotspot/src/share/vm/gc_implementation/shared/plab.hpp b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp index 3660f16e610..65b9550d8bd 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/plab.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp @@ -150,13 +150,13 @@ public: // PLAB book-keeping. class PLABStats VALUE_OBJ_CLASS_SPEC { - size_t _allocated; // Total allocated - size_t _wasted; // of which wasted (internal fragmentation) - size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size) - size_t _unused; // Unused in last buffer - size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized + size_t _allocated; // Total allocated + size_t _wasted; // of which wasted (internal fragmentation) + size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size) + size_t _unused; // Unused in last buffer + size_t _desired_net_plab_sz; // Output of filter (below), suitably trimmed and quantized AdaptiveWeightedAverage - _filter; // Integrator with decay + _filter; // Integrator with decay void reset() { _allocated = 0; @@ -165,12 +165,12 @@ class PLABStats VALUE_OBJ_CLASS_SPEC { _unused = 0; } public: - PLABStats(size_t desired_plab_sz_, unsigned wt) : + PLABStats(size_t desired_net_plab_sz_, unsigned wt) : _allocated(0), _wasted(0), _undo_wasted(0), _unused(0), - _desired_plab_sz(desired_plab_sz_), + _desired_net_plab_sz(desired_net_plab_sz_), _filter(wt) { } @@ -182,13 +182,12 @@ class PLABStats VALUE_OBJ_CLASS_SPEC { return PLAB::max_size(); } - size_t desired_plab_sz() { - return _desired_plab_sz; - } + // Calculates plab size for current number of gc worker threads. + size_t desired_plab_sz(uint no_of_gc_workers); - // Updates the current desired PLAB size. Computes the new desired PLAB size, + // Updates the current desired PLAB size. Computes the new desired PLAB size with one gc worker thread, // updates _desired_plab_sz and clears sensor accumulators. - void adjust_desired_plab_sz(uint no_of_gc_workers); + void adjust_desired_plab_sz(); void add_allocated(size_t v) { Atomic::add_ptr(v, &_allocated); From a9a83a919f6abb951fc995fa1207528a807f237d Mon Sep 17 00:00:00 2001 From: Derek White Date: Wed, 29 Apr 2015 09:59:51 +0200 Subject: [PATCH 14/19] 8076995: gc/ergonomics/TestDynamicNumberOfGCThreads.java failed with java.lang.RuntimeException: 'new_active_workers' missing from stdout/stderr Reviewed-by: brutisso, jmasa, jwilhelm --- .../shared/adaptiveSizePolicy.cpp | 3 ++- hotspot/test/TEST.groups | 3 +++ .../ergonomics/TestDynamicNumberOfGCThreads.java | 16 +++++++++++++--- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp index dfce8b559be..f84efc73d83 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp @@ -193,8 +193,9 @@ int AdaptiveSizePolicy::calc_active_workers(uintx total_workers, (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) { new_active_workers = total_workers; } else { + uintx min_workers = (total_workers == 1) ? 1 : 2; new_active_workers = calc_default_active_workers(total_workers, - 2, /* Minimum number of workers */ + min_workers, active_workers, application_workers); } diff --git a/hotspot/test/TEST.groups b/hotspot/test/TEST.groups index 5dba299ec1f..edc922ce470 100644 --- a/hotspot/test/TEST.groups +++ b/hotspot/test/TEST.groups @@ -233,6 +233,7 @@ needs_g1gc = \ gc/arguments/TestParallelGCThreads.java \ gc/arguments/TestUseCompressedOopsErgo.java \ gc/class_unloading/TestG1ClassUnloadingHWM.java \ + gc/ergonomics/TestDynamicNumberOfGCThreads.java gc/g1/ \ gc/metaspace/G1AddMetaspaceDependency.java \ gc/metaspace/TestMetaspacePerfCounters.java \ @@ -262,6 +263,7 @@ needs_parallelgc = \ gc/arguments/TestMinInitialErgonomics.java \ gc/arguments/TestParallelGCThreads.java \ gc/arguments/TestUseCompressedOopsErgo.java \ + gc/ergonomics/TestDynamicNumberOfGCThreads.java gc/metaspace/TestMetaspacePerfCounters.java \ gc/parallelScavenge/ \ gc/startup_warnings/TestParallelGC.java \ @@ -279,6 +281,7 @@ needs_cmsgc = \ gc/arguments/TestUseCompressedOopsErgo.java \ gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \ gc/concurrentMarkSweep/ \ + gc/ergonomics/TestDynamicNumberOfGCThreads.java gc/startup_warnings/TestCMS.java \ gc/startup_warnings/TestDefNewCMS.java \ gc/startup_warnings/TestParNewCMS.java diff --git a/hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java b/hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java index f6455761ada..f4a6625aae8 100644 --- a/hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java +++ b/hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java @@ -44,14 +44,24 @@ public class TestDynamicNumberOfGCThreads { } private static void verifyDynamicNumberOfGCThreads(OutputAnalyzer output) { + output.shouldHaveExitValue(0); // test should run succesfully output.shouldContain("new_active_workers"); - output.shouldHaveExitValue(0); } private static void testDynamicNumberOfGCThreads(String gcFlag) throws Exception { // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled - ProcessBuilder pb_enabled = - ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-Xmx10M", "-XX:+PrintGCDetails", "-XX:+UseDynamicNumberOfGCThreads", "-XX:+TraceDynamicGCThreads", GCTest.class.getName()); + String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+PrintGCDetails", "-XX:+UseDynamicNumberOfGCThreads", "-XX:+TraceDynamicGCThreads", GCTest.class.getName()}; + + // Base test with gc and +UseDynamicNumberOfGCThreads: + ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs); + verifyDynamicNumberOfGCThreads(new OutputAnalyzer(pb_enabled.start())); + + // Ensure it also works on uniprocessors or if user specifies -XX:ParallelGCThreads=1: + String[] extraArgs = {"-XX:+UnlockDiagnosticVMOptions", "-XX:+ForceDynamicNumberOfGCThreads", "-XX:ParallelGCThreads=1"}; + String[] finalArgs = new String[baseArgs.length + extraArgs.length]; + System.arraycopy(extraArgs, 0, finalArgs, 0, extraArgs.length); + System.arraycopy(baseArgs, 0, finalArgs, extraArgs.length, baseArgs.length); + pb_enabled = ProcessTools.createJavaProcessBuilder(finalArgs); verifyDynamicNumberOfGCThreads(new OutputAnalyzer(pb_enabled.start())); } From f10e4ec0a6bc088a76839efd87d8330195533cf3 Mon Sep 17 00:00:00 2001 From: Dmitry Fazunenko Date: Wed, 29 Apr 2015 15:32:05 +0400 Subject: [PATCH 15/19] 8073476: G1 logging ignores changes to PrintGC* flags via MXBeans Reviewed-by: brutisso, jwilhelm --- .../gc_implementation/g1/g1CollectedHeap.cpp | 2 ++ .../share/vm/gc_implementation/g1/g1Log.cpp | 36 +++++++++++++------ .../share/vm/gc_implementation/g1/g1Log.hpp | 5 ++- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 2aec852fd0d..2e2585cfc1a 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1167,6 +1167,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, SvcGCMarker sgcm(SvcGCMarker::FULL); ResourceMark rm; + G1Log::update_level(); print_heap_before_gc(); trace_heap_before_gc(gc_tracer); @@ -3649,6 +3650,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; + G1Log::update_level(); print_heap_before_gc(); trace_heap_before_gc(_gc_tracer_stw); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp index 56d957f76b4..dc3b4d1e64f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Log.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,17 +25,34 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1_globals.hpp" #include "gc_implementation/g1/g1Log.hpp" -#include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" G1Log::LogLevel G1Log::_level = G1Log::LevelNone; -// If G1LogLevel has not been set up we will use the values of PrintGC -// and PrintGCDetails for the logging level. + +// Updates _level based on PrintGC and PrintGCDetails values (unless +// G1LogLevel is set explicitly) // - PrintGC maps to "fine". // - PrintGCDetails maps to "finer". +void G1Log::update_level() { + if (FLAG_IS_DEFAULT(G1LogLevel)) { + _level = LevelNone; + if (PrintGCDetails) { + _level = LevelFiner; + } else if (PrintGC) { + _level = LevelFine; + } + } +} + + +// If G1LogLevel has not been set up we will use the values of PrintGC +// and PrintGCDetails for the logging level. void G1Log::init() { - if (G1LogLevel != NULL && G1LogLevel[0] != '\0') { - if (strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') { + if (!FLAG_IS_DEFAULT(G1LogLevel)) { + // PrintGC flags change won't have any affect, because G1LogLevel + // is set explicitly + if (G1LogLevel[0] == '\0' || strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') { _level = LevelNone; } else if (strncmp("fine", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') { _level = LevelFine; @@ -47,10 +64,7 @@ void G1Log::init() { warning("Unknown logging level '%s', should be one of 'fine', 'finer' or 'finest'.", G1LogLevel); } } else { - if (PrintGCDetails) { - _level = LevelFiner; - } else if (PrintGC) { - _level = LevelFine; - } + update_level(); } } + diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp index 6f72c8fbc8e..4bdc99b2af5 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1Log.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,6 +57,9 @@ class G1Log : public AllStatic { } static void init(); + + // Update to log level to reflect runtime changes to manageable flags + static void update_level(); }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP From c120339bf034171a670ccbddaa38f7a2dbc0de45 Mon Sep 17 00:00:00 2001 From: Eric Caspole Date: Wed, 29 Apr 2015 15:25:41 -0400 Subject: [PATCH 16/19] 8078405: Heap decommit failed in TestShrinkAuxiliaryData tests Modified test so options in the code are after the options from the environment, so -ExplicitGCInvokesConcurrent is always used. Reviewed-by: tschatzl, kbarrett --- hotspot/test/gc/g1/TestShrinkAuxiliaryData.java | 3 +-- hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java | 2 +- hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java | 2 +- hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java | 2 +- hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java | 2 +- hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java | 2 +- hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java | 2 +- 7 files changed, 7 insertions(+), 8 deletions(-) diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java index 3145eb63854..cc24e599057 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java @@ -76,7 +76,6 @@ public class TestShrinkAuxiliaryData { printTestInfo(maxCacheSize); vmOpts.add("-XX:G1ConcRSLogCacheSize=" + hotCardTableSize); - vmOpts.addAll(Arrays.asList(Utils.getTestJavaOpts())); // for 32 bits ObjectAlignmentInBytes is not a option if (Platform.is32bit()) { @@ -98,7 +97,7 @@ public class TestShrinkAuxiliaryData { private void performTest(List opts) throws Exception { ProcessBuilder pb - = ProcessTools.createJavaProcessBuilder( + = ProcessTools.createJavaProcessBuilder(true, opts.toArray(new String[opts.size()]) ); diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java index 5a51d2368be..7042b708ce2 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java @@ -23,7 +23,7 @@ /** * @test TestShrinkAuxiliaryData05 - * @bug 8038423 8061715 + * @bug 8038423 8061715 8078405 * @summary Checks that decommitment occurs for JVM with different * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values * @requires vm.gc=="G1" | vm.gc=="null" diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java index 48a82b25dd1..db2ca528579 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java @@ -23,7 +23,7 @@ /** * @test TestShrinkAuxiliaryData10 - * @bug 8038423 8061715 + * @bug 8038423 8061715 8078405 * @summary Checks that decommitment occurs for JVM with different * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values * @requires vm.gc=="G1" | vm.gc=="null" diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java index 265302420c4..9460653764f 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java @@ -23,7 +23,7 @@ /** * @test TestShrinkAuxiliaryData15 - * @bug 8038423 8061715 + * @bug 8038423 8061715 8078405 * @summary Checks that decommitment occurs for JVM with different * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values * @requires vm.gc=="G1" | vm.gc=="null" diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java index 9cc4893e42d..3c0b1721f54 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java @@ -23,7 +23,7 @@ /** * @test TestShrinkAuxiliaryData20 - * @bug 8038423 8061715 + * @bug 8038423 8061715 8078405 * @summary Checks that decommitment occurs for JVM with different * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values * @requires vm.gc=="G1" | vm.gc=="null" diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java index 2cb0b0dc8ca..bb556c3ff08 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java @@ -23,7 +23,7 @@ /** * @test TestShrinkAuxiliaryData25 - * @bug 8038423 8061715 + * @bug 8038423 8061715 8078405 * @summary Checks that decommitment occurs for JVM with different * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values * @requires vm.gc=="G1" | vm.gc=="null" diff --git a/hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java b/hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java index a8e6590be3d..87ee103cfdb 100644 --- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java +++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java @@ -23,7 +23,7 @@ /** * @test TestShrinkAuxiliaryData30 - * @bug 8038423 8061715 + * @bug 8038423 8061715 8078405 * @summary Checks that decommitment occurs for JVM with different * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values * @requires vm.gc=="G1" | vm.gc=="null" From 20689e5397e5aa6dcd0a32b7d4212cd409d24ca4 Mon Sep 17 00:00:00 2001 From: Stefan Johansson Date: Thu, 30 Apr 2015 10:14:26 +0200 Subject: [PATCH 17/19] 8078897: Clean out unused code in G1MMUTracker Reviewed-by: brutisso, jwilhelm, drwhite --- .../vm/gc_implementation/g1/g1MMUTracker.cpp | 40 +------------------ .../vm/gc_implementation/g1/g1MMUTracker.hpp | 5 +-- 2 files changed, 2 insertions(+), 43 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp index bd91c8fd29a..d1b38a733aa 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,9 +76,6 @@ double G1MMUTrackerQueue::calculate_gc_time(double current_time) { } void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) { - double longest_allowed = longest_pause_internal(start); - if (longest_allowed < 0.0) - longest_allowed = 0.0; double duration = end - start; remove_expired_entries(end); @@ -111,41 +108,6 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) { // this is for trying things out in the future and a couple // of other places (debugging) -double G1MMUTrackerQueue::longest_pause(double current_time) { - if (_DISABLE_MMU) - return _max_gc_time; - - MutexLockerEx x(MMUTracker_lock, Mutex::_no_safepoint_check_flag); - remove_expired_entries(current_time); - - return longest_pause_internal(current_time); -} - -double G1MMUTrackerQueue::longest_pause_internal(double current_time) { - double target_time = _max_gc_time; - - while( 1 ) { - double gc_time = - calculate_gc_time(current_time + target_time); - double diff = target_time + gc_time - _max_gc_time; - if (!is_double_leq_0(diff)) { - target_time -= diff; - if (is_double_leq_0(target_time)) { - target_time = -1.0; - break; - } - } else { - break; - } - } - - return target_time; -} - -// basically the _internal call does not remove expired entries -// this is for trying things out in the future and a couple -// of other places (debugging) - double G1MMUTrackerQueue::when_sec(double current_time, double pause_time) { if (_DISABLE_MMU) return 0.0; diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp index 956b23e0450..c4890d97dc0 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,6 @@ public: G1MMUTracker(double time_slice, double max_gc_time); virtual void add_pause(double start, double end, bool gc_thread) = 0; - virtual double longest_pause(double current_time) = 0; virtual double when_sec(double current_time, double pause_time) = 0; double max_gc_time() { @@ -122,7 +121,6 @@ private: void remove_expired_entries(double current_time); double calculate_gc_time(double current_time); - double longest_pause_internal(double current_time); double when_internal(double current_time, double pause_time); public: @@ -130,7 +128,6 @@ public: virtual void add_pause(double start, double end, bool gc_thread); - virtual double longest_pause(double current_time); virtual double when_sec(double current_time, double pause_time); }; From 0227995013d716235d9320a3010c4aab03034dce Mon Sep 17 00:00:00 2001 From: Kim Barrett Date: Fri, 1 May 2015 17:38:12 -0400 Subject: [PATCH 18/19] 8075215: SATB buffer processing found reclaimed humongous object Don't assume SATB buffer entries are valid objects Reviewed-by: brutisso, ecaspole --- .../gc_implementation/g1/concurrentMark.cpp | 49 +++++--- .../gc_implementation/g1/concurrentMark.hpp | 18 ++- .../g1/concurrentMark.inline.hpp | 114 +++++++++--------- .../vm/gc_implementation/g1/satbQueue.cpp | 43 ++++--- .../vm/gc_implementation/g1/satbQueue.hpp | 35 +++--- 5 files changed, 151 insertions(+), 108 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 4bc2e658e74..710ae7d2111 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -2552,31 +2552,50 @@ void ConcurrentMark::swapMarkBitMaps() { _nextMarkBitMap = (CMBitMap*) temp; } -class CMObjectClosure; - -// Closure for iterating over objects, currently only used for -// processing SATB buffers. -class CMObjectClosure : public ObjectClosure { +// Closure for marking entries in SATB buffers. +class CMSATBBufferClosure : public SATBBufferClosure { private: CMTask* _task; + G1CollectedHeap* _g1h; -public: - void do_object(oop obj) { - _task->deal_with_reference(obj); + // This is very similar to CMTask::deal_with_reference, but with + // more relaxed requirements for the argument, so this must be more + // circumspect about treating the argument as an object. + void do_entry(void* entry) const { + _task->increment_refs_reached(); + HeapRegion* hr = _g1h->heap_region_containing_raw(entry); + if (entry < hr->next_top_at_mark_start()) { + // Until we get here, we don't know whether entry refers to a valid + // object; it could instead have been a stale reference. + oop obj = static_cast(entry); + assert(obj->is_oop(true /* ignore mark word */), + err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); + _task->make_reference_grey(obj, hr); + } } - CMObjectClosure(CMTask* task) : _task(task) { } +public: + CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) + : _task(task), _g1h(g1h) { } + + virtual void do_buffer(void** buffer, size_t size) { + for (size_t i = 0; i < size; ++i) { + do_entry(buffer[i]); + } + } }; class G1RemarkThreadsClosure : public ThreadClosure { - CMObjectClosure _cm_obj; + CMSATBBufferClosure _cm_satb_cl; G1CMOopClosure _cm_cl; MarkingCodeBlobClosure _code_cl; int _thread_parity; public: G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : - _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), + _cm_satb_cl(task, g1h), + _cm_cl(g1h, g1h->concurrent_mark(), task), + _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), _thread_parity(Threads::thread_claim_parity()) {} void do_thread(Thread* thread) { @@ -2592,11 +2611,11 @@ class G1RemarkThreadsClosure : public ThreadClosure { // live by the SATB invariant but other oops recorded in nmethods may behave differently. jt->nmethods_do(&_code_cl); - jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); + jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); } } else if (thread->is_VM_thread()) { if (thread->claim_oops_do(true, _thread_parity)) { - JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); + JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); } } } @@ -3694,13 +3713,13 @@ void CMTask::drain_satb_buffers() { // very counter productive if it did that. :-) _draining_satb_buffers = true; - CMObjectClosure oc(this); + CMSATBBufferClosure satb_cl(this, _g1h); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); // This keeps claiming and applying the closure to completed buffers // until we run out of buffers or we need to abort. while (!has_aborted() && - satb_mq_set.apply_closure_to_completed_buffer(&oc)) { + satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { if (_cm->verbose_medium()) { gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp index caedfce581e..78b24b6dc52 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp @@ -1098,9 +1098,9 @@ private: void regular_clock_call(); bool concurrent() { return _concurrent; } - // Test whether objAddr might have already been passed over by the + // Test whether obj might have already been passed over by the // mark bitmap scan, and so needs to be pushed onto the mark stack. - bool is_below_finger(HeapWord* objAddr, HeapWord* global_finger) const; + bool is_below_finger(oop obj, HeapWord* global_finger) const; template void process_grey_object(oop obj); @@ -1151,8 +1151,18 @@ public: void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); - // It grays the object by marking it and, if necessary, pushing it - // on the local queue + // Increment the number of references this task has visited. + void increment_refs_reached() { ++_refs_reached; } + + // Grey the object by marking it. If not already marked, push it on + // the local queue if below the finger. + // Precondition: obj is in region. + // Precondition: obj is below region's NTAMS. + inline void make_reference_grey(oop obj, HeapRegion* region); + + // Grey the object (by calling make_grey_reference) if required, + // e.g. obj is below its containing region's NTAMS. + // Precondition: obj is a valid heap object. inline void deal_with_reference(oop obj); // It scans an object and visits its children. diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp index 3d00aec250d..61e627c21fa 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp @@ -260,15 +260,15 @@ inline void CMTask::push(oop obj) { ++_local_pushes ); } -inline bool CMTask::is_below_finger(HeapWord* objAddr, - HeapWord* global_finger) const { - // If objAddr is above the global finger, then the mark bitmap scan +inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { + // If obj is above the global finger, then the mark bitmap scan // will find it later, and no push is needed. Similarly, if we have - // a current region and objAddr is between the local finger and the + // a current region and obj is between the local finger and the // end of the current region, then no push is needed. The tradeoff // of checking both vs only checking the global finger is that the // local check will be more accurate and so result in fewer pushes, // but may also be a little slower. + HeapWord* objAddr = (HeapWord*)obj; if (_finger != NULL) { // We have a current region. @@ -278,7 +278,7 @@ inline bool CMTask::is_below_finger(HeapWord* objAddr, assert(_region_limit != NULL, "invariant"); assert(_region_limit <= global_finger, "invariant"); - // True if objAddr is less than the local finger, or is between + // True if obj is less than the local finger, or is between // the region limit and the global finger. if (objAddr < _finger) { return true; @@ -290,13 +290,65 @@ inline bool CMTask::is_below_finger(HeapWord* objAddr, return objAddr < global_finger; } +inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) { + if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) { + + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%u] marked object " PTR_FORMAT, + _worker_id, p2i(obj)); + } + + // No OrderAccess:store_load() is needed. It is implicit in the + // CAS done in CMBitMap::parMark() call in the routine above. + HeapWord* global_finger = _cm->finger(); + + // We only need to push a newly grey object on the mark + // stack if it is in a section of memory the mark bitmap + // scan has already examined. Mark bitmap scanning + // maintains progress "fingers" for determining that. + // + // Notice that the global finger might be moving forward + // concurrently. This is not a problem. In the worst case, we + // mark the object while it is above the global finger and, by + // the time we read the global finger, it has moved forward + // past this object. In this case, the object will probably + // be visited when a task is scanning the region and will also + // be pushed on the stack. So, some duplicate work, but no + // correctness problems. + if (is_below_finger(obj, global_finger)) { + if (obj->is_typeArray()) { + // Immediately process arrays of primitive types, rather + // than pushing on the mark stack. This keeps us from + // adding humongous objects to the mark stack that might + // be reclaimed before the entry is processed - see + // selection of candidates for eager reclaim of humongous + // objects. The cost of the additional type test is + // mitigated by avoiding a trip through the mark stack, + // by only doing a bookkeeping update and avoiding the + // actual scan of the object - a typeArray contains no + // references, and the metadata is built-in. + process_grey_object(obj); + } else { + if (_cm->verbose_high()) { + gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT + ", global: " PTR_FORMAT ") pushing " + PTR_FORMAT " on mark stack", + _worker_id, p2i(_finger), + p2i(global_finger), p2i(obj)); + } + push(obj); + } + } + } +} + inline void CMTask::deal_with_reference(oop obj) { if (_cm->verbose_high()) { gclog_or_tty->print_cr("[%u] we're dealing with reference = "PTR_FORMAT, _worker_id, p2i((void*) obj)); } - ++_refs_reached; + increment_refs_reached(); HeapWord* objAddr = (HeapWord*) obj; assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj))); @@ -308,55 +360,7 @@ inline void CMTask::deal_with_reference(oop obj) { // anything with it). HeapRegion* hr = _g1h->heap_region_containing_raw(obj); if (!hr->obj_allocated_since_next_marking(obj)) { - if (_cm->verbose_high()) { - gclog_or_tty->print_cr("[%u] "PTR_FORMAT" is not considered marked", - _worker_id, p2i((void*) obj)); - } - - // we need to mark it first - if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) { - // No OrderAccess:store_load() is needed. It is implicit in the - // CAS done in CMBitMap::parMark() call in the routine above. - HeapWord* global_finger = _cm->finger(); - - // We only need to push a newly grey object on the mark - // stack if it is in a section of memory the mark bitmap - // scan has already examined. Mark bitmap scanning - // maintains progress "fingers" for determining that. - // - // Notice that the global finger might be moving forward - // concurrently. This is not a problem. In the worst case, we - // mark the object while it is above the global finger and, by - // the time we read the global finger, it has moved forward - // past this object. In this case, the object will probably - // be visited when a task is scanning the region and will also - // be pushed on the stack. So, some duplicate work, but no - // correctness problems. - if (is_below_finger(objAddr, global_finger)) { - if (obj->is_typeArray()) { - // Immediately process arrays of primitive types, rather - // than pushing on the mark stack. This keeps us from - // adding humongous objects to the mark stack that might - // be reclaimed before the entry is processed - see - // selection of candidates for eager reclaim of humongous - // objects. The cost of the additional type test is - // mitigated by avoiding a trip through the mark stack, - // by only doing a bookkeeping update and avoiding the - // actual scan of the object - a typeArray contains no - // references, and the metadata is built-in. - process_grey_object(obj); - } else { - if (_cm->verbose_high()) { - gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT - ", global: " PTR_FORMAT ") pushing " - PTR_FORMAT " on mark stack", - _worker_id, p2i(_finger), - p2i(global_finger), p2i(objAddr)); - } - push(obj); - } - } - } + make_reference_grey(obj, hr); } } } diff --git a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp index b3bbf570cbe..ddaa28cf140 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp @@ -29,6 +29,7 @@ #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" @@ -160,10 +161,7 @@ bool ObjPtrQueue::should_enqueue_buffer() { assert(_lock == NULL || _lock->owned_by_self(), "we should have taken the lock before calling this"); - // Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to - // filter the buffer given that this will remove any references into - // the CSet as we currently assume that no such refs will appear in - // enqueued buffers. + // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering. // This method should only be called if there is a non-NULL buffer // that is full. @@ -180,25 +178,19 @@ bool ObjPtrQueue::should_enqueue_buffer() { return should_enqueue; } -void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) { +void ObjPtrQueue::apply_closure_and_empty(SATBBufferClosure* cl) { + assert(SafepointSynchronize::is_at_safepoint(), + "SATB queues must only be processed at safepoints"); if (_buf != NULL) { - apply_closure_to_buffer(cl, _buf, _index, _sz); + assert(_index % sizeof(void*) == 0, "invariant"); + assert(_sz % sizeof(void*) == 0, "invariant"); + assert(_index <= _sz, "invariant"); + cl->do_buffer(_buf + byte_index_to_index((int)_index), + byte_index_to_index((int)(_sz - _index))); _index = _sz; } } -void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl, - void** buf, size_t index, size_t sz) { - if (cl == NULL) return; - for (size_t i = index; i < sz; i += oopSize) { - oop obj = (oop)buf[byte_index_to_index((int)i)]; - // There can be NULL entries because of destructors. - if (obj != NULL) { - cl->do_object(obj); - } - } -} - #ifndef PRODUCT // Helpful for debugging @@ -289,7 +281,7 @@ void SATBMarkQueueSet::filter_thread_buffers() { shared_satb_queue()->filter(); } -bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) { +bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) { BufferNode* nd = NULL; { MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); @@ -303,7 +295,18 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) { } if (nd != NULL) { void **buf = BufferNode::make_buffer_from_node(nd); - ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz); + // Skip over NULL entries at beginning (e.g. push end) of buffer. + // Filtering can result in non-full completed buffers; see + // should_enqueue_buffer. + assert(_sz % sizeof(void*) == 0, "invariant"); + size_t limit = ObjPtrQueue::byte_index_to_index((int)_sz); + for (size_t i = 0; i < limit; ++i) { + if (buf[i] != NULL) { + // Found the end of the block of NULLs; process the remainder. + cl->do_buffer(buf + i, limit - i); + break; + } + } deallocate_buffer(buf); return true; } else { diff --git a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp index 596904d06df..5948959190f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp @@ -25,29 +25,30 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SATBQUEUE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_SATBQUEUE_HPP +#include "memory/allocation.hpp" #include "gc_implementation/g1/ptrQueue.hpp" -class ObjectClosure; class JavaThread; class SATBMarkQueueSet; +// Base class for processing the contents of a SATB buffer. +class SATBBufferClosure : public StackObj { +protected: + ~SATBBufferClosure() { } + +public: + // Process the SATB entries in the designated buffer range. + virtual void do_buffer(void** buffer, size_t size) = 0; +}; + // A ptrQueue whose elements are "oops", pointers to object heads. class ObjPtrQueue: public PtrQueue { - friend class Threads; friend class SATBMarkQueueSet; - friend class G1RemarkThreadsClosure; private: // Filter out unwanted entries from the buffer. void filter(); - // Apply the closure to all elements and empty the buffer; - void apply_closure_and_empty(ObjectClosure* cl); - - // Apply the closure to all elements of "buf", down to "index" (inclusive.) - static void apply_closure_to_buffer(ObjectClosure* cl, - void** buf, size_t index, size_t sz); - public: ObjPtrQueue(PtrQueueSet* qset, bool perm = false) : // SATB queues are only active during marking cycles. We create @@ -60,6 +61,10 @@ public: // Process queue entries and free resources. void flush(); + // Apply cl to the active part of the buffer. + // Prerequisite: Must be at a safepoint. + void apply_closure_and_empty(SATBBufferClosure* cl); + // Overrides PtrQueue::should_enqueue_buffer(). See the method's // definition for more information. virtual bool should_enqueue_buffer(); @@ -97,10 +102,12 @@ public: // Filter all the currently-active SATB buffers. void filter_thread_buffers(); - // If there exists some completed buffer, pop it, then apply the - // closure to all its elements, and return true. If no - // completed buffers exist, return false. - bool apply_closure_to_completed_buffer(ObjectClosure* closure); + // If there exists some completed buffer, pop and process it, and + // return true. Otherwise return false. Processing a buffer + // consists of applying the closure to the buffer range starting + // with the first non-NULL entry to the end of the buffer; the + // leading entries may be NULL due to filtering. + bool apply_closure_to_completed_buffer(SATBBufferClosure* cl); #ifndef PRODUCT // Helpful for debugging From 8986235472e2cfc084baf7e031d1015e5d2408c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Per=20Lid=C3=A9n?= Date: Mon, 4 May 2015 09:24:03 +0200 Subject: [PATCH 19/19] 8079148: Fix incorrect include guards Reviewed-by: stefank, ehelin --- .../concurrentMarkSweep/adaptiveFreeList.hpp | 8 ++++---- .../src/share/vm/gc_implementation/g1/evacuationInfo.hpp | 8 ++++---- .../src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp | 6 +++--- .../share/vm/gc_implementation/g1/g1RootProcessor.hpp | 6 +++--- .../vm/gc_implementation/g1/heapRegionBounds.inline.hpp | 7 ++++++- .../gc_implementation/shared/objectCountEventSender.hpp | 8 ++++---- hotspot/src/share/vm/memory/guardedMemory.hpp | 8 ++++---- .../share/vm/memory/metaspaceChunkFreeListSummary.hpp | 9 +++++---- .../src/share/vm/memory/metaspaceGCThresholdUpdater.hpp | 8 ++++---- hotspot/src/share/vm/memory/metaspaceShared.hpp | 9 +++++---- hotspot/src/share/vm/memory/metaspaceTracer.hpp | 8 ++++---- hotspot/src/share/vm/memory/padded.inline.hpp | 7 ++++++- hotspot/src/share/vm/memory/referenceType.hpp | 8 ++++---- 13 files changed, 56 insertions(+), 44 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp index f2ae38f2568..337040b9cd4 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP -#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP +#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_ADAPTIVEFREELIST_HPP +#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_ADAPTIVEFREELIST_HPP #include "memory/freeList.hpp" #include "gc_implementation/shared/allocationStats.hpp" @@ -226,4 +226,4 @@ class AdaptiveFreeList : public FreeList { #endif // NOT PRODUCT }; -#endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP +#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_ADAPTIVEFREELIST_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/evacuationInfo.hpp b/hotspot/src/share/vm/gc_implementation/g1/evacuationInfo.hpp index 97e0ab2f735..06bbefff93e 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/evacuationInfo.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/evacuationInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP -#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_EVACUATIONINFO_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_EVACUATIONINFO_HPP #include "memory/allocation.hpp" @@ -78,4 +78,4 @@ public: uint regions_freed() { return _regions_freed; } }; -#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_EVACUATIONINFO_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp index 323ecf691d3..1ab1ba62871 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP -#define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMES_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMES_HPP #include "memory/allocation.hpp" @@ -286,4 +286,4 @@ public: ~G1GCParPhaseTimesTracker(); }; -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMES_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp index 38287e784ba..29e9df5664a 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP -#define SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ROOTPROCESSOR_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ROOTPROCESSOR_HPP #include "memory/allocation.hpp" #include "memory/strongRootsScope.hpp" @@ -118,4 +118,4 @@ public: void set_num_workers(int active_workers); }; -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ROOTPROCESSOR_HPP diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp index 9ffeed4512a..061d1a30712 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,9 @@ * */ +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_INLINE_HPP + #include "gc_implementation/g1/heapRegionBounds.hpp" size_t HeapRegionBounds::min_size() { @@ -35,3 +38,5 @@ size_t HeapRegionBounds::max_size() { size_t HeapRegionBounds::target_number() { return TARGET_REGION_NUMBER; } + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_INLINE_HPP diff --git a/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp b/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp index b68e86dea8a..c814f5e6cd2 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP -#define SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_OBJECTCOUNTEVENTSENDER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_OBJECTCOUNTEVENTSENDER_HPP #include "gc_implementation/shared/gcTrace.hpp" #include "memory/allocation.hpp" @@ -42,4 +42,4 @@ class ObjectCountEventSender : public AllStatic { #endif // INCLUDE_SERVICES -#endif // SHARE_VM_OBJECT_COUNT_EVENT_SENDER +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_OBJECTCOUNTEVENTSENDER_HPP diff --git a/hotspot/src/share/vm/memory/guardedMemory.hpp b/hotspot/src/share/vm/memory/guardedMemory.hpp index 0d37bb03361..e3b1ed8e57e 100644 --- a/hotspot/src/share/vm/memory/guardedMemory.hpp +++ b/hotspot/src/share/vm/memory/guardedMemory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP -#define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP +#ifndef SHARE_VM_MEMORY_GUARDEDMEMORY_HPP +#define SHARE_VM_MEMORY_GUARDEDMEMORY_HPP #include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" @@ -323,4 +323,4 @@ protected: #endif }; // GuardedMemory -#endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP +#endif // SHARE_VM_MEMORY_GUARDEDMEMORY_HPP diff --git a/hotspot/src/share/vm/memory/metaspaceChunkFreeListSummary.hpp b/hotspot/src/share/vm/memory/metaspaceChunkFreeListSummary.hpp index bc262f6a19d..0119302ed5c 100644 --- a/hotspot/src/share/vm/memory/metaspaceChunkFreeListSummary.hpp +++ b/hotspot/src/share/vm/memory/metaspaceChunkFreeListSummary.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,8 +21,9 @@ * questions. * */ -#ifndef SHARE_VM_MEMORY_METASPACE_CHUNK_FREE_LIST_SUMMARY_HPP -#define SHARE_VM_MEMORY_METASPACE_CHUNK_FREE_LIST_SUMMARY_HPP + +#ifndef SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP +#define SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP #include "memory/allocation.hpp" @@ -100,4 +101,4 @@ class MetaspaceChunkFreeListSummary VALUE_OBJ_CLASS_SPEC { } }; -#endif // SHARE_VM_MEMORY_METASPACE_CHUNK_FREE_LIST_SUMMARY_HPP +#endif // SHARE_VM_MEMORY_METASPACECHUNKFREELISTSUMMARY_HPP diff --git a/hotspot/src/share/vm/memory/metaspaceGCThresholdUpdater.hpp b/hotspot/src/share/vm/memory/metaspaceGCThresholdUpdater.hpp index cbb221dd33b..d692ccb7e85 100644 --- a/hotspot/src/share/vm/memory/metaspaceGCThresholdUpdater.hpp +++ b/hotspot/src/share/vm/memory/metaspaceGCThresholdUpdater.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_MEMORY_METASPACE_GC_THRESHOLD_UPDATER_HPP -#define SHARE_VM_MEMORY_METASPACE_GC_THRESHOLD_UPDATER_HPP +#ifndef SHARE_VM_MEMORY_METASPACEGCTHRESHOLDUPDATER_HPP +#define SHARE_VM_MEMORY_METASPACEGCTHRESHOLDUPDATER_HPP #include "memory/allocation.hpp" #include "utilities/debug.hpp" @@ -49,4 +49,4 @@ class MetaspaceGCThresholdUpdater : public AllStatic { } }; -#endif // SHARE_VM_MEMORY_METASPACE_GC_THRESHOLD_UPDATER_HPP +#endif // SHARE_VM_MEMORY_METASPACEGCTHRESHOLDUPDATER_HPP diff --git a/hotspot/src/share/vm/memory/metaspaceShared.hpp b/hotspot/src/share/vm/memory/metaspaceShared.hpp index 85bf0e4a303..d4b86cda5b9 100644 --- a/hotspot/src/share/vm/memory/metaspaceShared.hpp +++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,8 +21,9 @@ * questions. * */ -#ifndef SHARE_VM_MEMORY_METASPACE_SHARED_HPP -#define SHARE_VM_MEMORY_METASPACE_SHARED_HPP + +#ifndef SHARE_VM_MEMORY_METASPACESHARED_HPP +#define SHARE_VM_MEMORY_METASPACESHARED_HPP #include "classfile/compactHashtable.hpp" #include "memory/allocation.hpp" @@ -153,4 +154,4 @@ class MetaspaceShared : AllStatic { static int count_class(const char* classlist_file); static void estimate_regions_size() NOT_CDS_RETURN; }; -#endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP +#endif // SHARE_VM_MEMORY_METASPACESHARED_HPP diff --git a/hotspot/src/share/vm/memory/metaspaceTracer.hpp b/hotspot/src/share/vm/memory/metaspaceTracer.hpp index 4ae0138d581..48289545791 100644 --- a/hotspot/src/share/vm/memory/metaspaceTracer.hpp +++ b/hotspot/src/share/vm/memory/metaspaceTracer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_MEMORY_METASPACE_TRACER_HPP -#define SHARE_VM_MEMORY_METASPACE_TRACER_HPP +#ifndef SHARE_VM_MEMORY_METASPACETRACER_HPP +#define SHARE_VM_MEMORY_METASPACETRACER_HPP #include "memory/allocation.hpp" #include "memory/metaspace.hpp" @@ -52,4 +52,4 @@ class MetaspaceTracer : public CHeapObj { }; -#endif // SHARE_VM_MEMORY_METASPACE_TRACER_HPP +#endif // SHARE_VM_MEMORY_METASPACETRACER_HPP diff --git a/hotspot/src/share/vm/memory/padded.inline.hpp b/hotspot/src/share/vm/memory/padded.inline.hpp index 1e4f8858460..53404f3909b 100644 --- a/hotspot/src/share/vm/memory/padded.inline.hpp +++ b/hotspot/src/share/vm/memory/padded.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,9 @@ * */ +#ifndef SHARE_VM_MEMORY_PADDED_INLINE_HPP +#define SHARE_VM_MEMORY_PADDED_INLINE_HPP + #include "memory/allocation.inline.hpp" #include "memory/padded.hpp" #include "utilities/debug.hpp" @@ -86,3 +89,5 @@ T* PaddedPrimitiveArray::create_unfreeable(size_t length) { return (T*)align_pointer_up(chunk, alignment); } + +#endif // SHARE_VM_MEMORY_PADDED_INLINE_HPP diff --git a/hotspot/src/share/vm/memory/referenceType.hpp b/hotspot/src/share/vm/memory/referenceType.hpp index 6ce944c0e70..a54e8238e87 100644 --- a/hotspot/src/share/vm/memory/referenceType.hpp +++ b/hotspot/src/share/vm/memory/referenceType.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_MEMORY_REFRERENCETYPE_HPP -#define SHARE_VM_MEMORY_REFRERENCETYPE_HPP +#ifndef SHARE_VM_MEMORY_REFERENCETYPE_HPP +#define SHARE_VM_MEMORY_REFERENCETYPE_HPP #include "utilities/debug.hpp" @@ -39,4 +39,4 @@ enum ReferenceType { REF_CLEANER // Subclass of sun/misc/Cleaner }; -#endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP +#endif // SHARE_VM_MEMORY_REFERENCETYPE_HPP