8146690: Make all classes in GC follow the naming convention

Reviewed-by: dholmes, stefank
This commit is contained in:
David Lindholm 2016-01-14 13:26:19 +01:00
parent 49d61bdeb6
commit ad0c208a5a
77 changed files with 411 additions and 411 deletions

View file

@ -1075,7 +1075,7 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
} }
// Check GC_locker::needs_gc and enter the runtime if it's true. This // Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been // keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an // forced. Save down any oops in registers and describe them in an
// OopMap. // OopMap.
@ -1257,14 +1257,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI // GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before // functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the // passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker // native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other // lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle // parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them // block and the check for pending exceptions it's impossible for them
// to be thrown. // to be thrown.
// //
// They are roughly structured like this: // They are roughly structured like this:
// if (GC_locker::needs_gc()) // if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical(); // SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native // tranistion to thread_in_native
// unpack arrray arguments and call native entry point // unpack arrray arguments and call native entry point

View file

@ -1474,7 +1474,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
} }
} }
// Check GC_locker::needs_gc and enter the runtime if it's true. This // Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been // keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an // forced. Save down any oops in registers and describe them in an
// OopMap. // OopMap.
@ -1486,9 +1486,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
VMRegPair* in_regs, VMRegPair* in_regs,
BasicType* in_sig_bt, BasicType* in_sig_bt,
Register tmp_reg ) { Register tmp_reg ) {
__ block_comment("check GC_locker::needs_gc"); __ block_comment("check GCLocker::needs_gc");
Label cont; Label cont;
__ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address()); __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address());
__ cmplwi(CCR0, tmp_reg, 0); __ cmplwi(CCR0, tmp_reg, 0);
__ beq(CCR0, cont); __ beq(CCR0, cont);
@ -1687,14 +1687,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI // GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before // functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the // passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker // native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other // lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle // parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them // block and the check for pending exceptions it's impossible for them
// to be thrown. // to be thrown.
// //
// They are roughly structured like this: // They are roughly structured like this:
// if (GC_locker::needs_gc()) // if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical(); // SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native // tranistion to thread_in_native
// unpack arrray arguments and call native entry point // unpack arrray arguments and call native entry point

View file

@ -1748,7 +1748,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
} }
// Check GC_locker::needs_gc and enter the runtime if it's true. This // Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been // keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an // forced. Save down any oops in registers and describe them in an
// OopMap. // OopMap.
@ -1759,9 +1759,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
OopMapSet* oop_maps, OopMapSet* oop_maps,
VMRegPair* in_regs, VMRegPair* in_regs,
BasicType* in_sig_bt) { BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc"); __ block_comment("check GCLocker::needs_gc");
Label cont; Label cont;
AddressLiteral sync_state(GC_locker::needs_gc_address()); AddressLiteral sync_state(GCLocker::needs_gc_address());
__ load_bool_contents(sync_state, G3_scratch); __ load_bool_contents(sync_state, G3_scratch);
__ cmp_zero_and_br(Assembler::equal, G3_scratch, cont); __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
__ delayed()->nop(); __ delayed()->nop();
@ -1936,14 +1936,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI // GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before // functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the // passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker // native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other // lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle // parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them // block and the check for pending exceptions it's impossible for them
// to be thrown. // to be thrown.
// //
// They are roughly structured like this: // They are roughly structured like this:
// if (GC_locker::needs_gc()) // if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical(); // SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native // tranistion to thread_in_native
// unpack arrray arguments and call native entry point // unpack arrray arguments and call native entry point

View file

@ -1271,7 +1271,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
} }
} }
// Check GC_locker::needs_gc and enter the runtime if it's true. This // Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been // keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an // forced. Save down any oops in registers and describe them in an
// OopMap. // OopMap.
@ -1284,9 +1284,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
OopMapSet* oop_maps, OopMapSet* oop_maps,
VMRegPair* in_regs, VMRegPair* in_regs,
BasicType* in_sig_bt) { BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc"); __ block_comment("check GCLocker::needs_gc");
Label cont; Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false); __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont); __ jcc(Assembler::equal, cont);
// Save down any incoming oops and call into the runtime to halt for a GC // Save down any incoming oops and call into the runtime to halt for a GC
@ -1469,14 +1469,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI // GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before // functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the // passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker // native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other // lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle // parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them // block and the check for pending exceptions it's impossible for them
// to be thrown. // to be thrown.
// //
// They are roughly structured like this: // They are roughly structured like this:
// if (GC_locker::needs_gc()) // if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical(); // SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native // tranistion to thread_in_native
// unpack arrray arguments and call native entry point // unpack arrray arguments and call native entry point

View file

@ -1416,7 +1416,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
} }
// Check GC_locker::needs_gc and enter the runtime if it's true. This // Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been // keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an // forced. Save down any oops in registers and describe them in an
// OopMap. // OopMap.
@ -1428,9 +1428,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
OopMapSet* oop_maps, OopMapSet* oop_maps,
VMRegPair* in_regs, VMRegPair* in_regs,
BasicType* in_sig_bt) { BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc"); __ block_comment("check GCLocker::needs_gc");
Label cont; Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false); __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont); __ jcc(Assembler::equal, cont);
// Save down any incoming oops and call into the runtime to halt for a GC // Save down any incoming oops and call into the runtime to halt for a GC
@ -1795,14 +1795,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI // GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before // functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the // passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker // native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other // lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle // parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them // block and the check for pending exceptions it's impossible for them
// to be thrown. // to be thrown.
// //
// They are roughly structured like this: // They are roughly structured like this:
// if (GC_locker::needs_gc()) // if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical(); // SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native // tranistion to thread_in_native
// unpack arrray arguments and call native entry point // unpack arrray arguments and call native entry point

View file

@ -510,7 +510,7 @@ static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
} }
void CodeBuffer::finalize_oop_references(const methodHandle& mh) { void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
GrowableArray<oop> oops; GrowableArray<oop> oops;

View file

@ -971,7 +971,7 @@ void ciEnv::register_method(ciMethod* target,
// and invalidating our dependencies until we install this method. // and invalidating our dependencies until we install this method.
// No safepoints are allowed. Otherwise, class redefinition can occur in between. // No safepoints are allowed. Otherwise, class redefinition can occur in between.
MutexLocker ml(Compile_lock); MutexLocker ml(Compile_lock);
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// Change in Jvmti state may invalidate compilation. // Change in Jvmti state may invalidate compilation.
if (!failing() && jvmti_state_changed()) { if (!failing() && jvmti_state_changed()) {

View file

@ -863,7 +863,7 @@ void ClassFileParser::parse_interfaces(const ClassFileStream* const stream,
initialize_hashtable(interface_names); initialize_hashtable(interface_names);
bool dup = false; bool dup = false;
{ {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
for (index = 0; index < itfs_len; index++) { for (index = 0; index < itfs_len; index++) {
const Klass* const k = _local_interfaces->at(index); const Klass* const k = _local_interfaces->at(index);
const Symbol* const name = InstanceKlass::cast(k)->name(); const Symbol* const name = InstanceKlass::cast(k)->name();
@ -1620,7 +1620,7 @@ void ClassFileParser::parse_fields(const ClassFileStream* const cfs,
initialize_hashtable(names_and_sigs); initialize_hashtable(names_and_sigs);
bool dup = false; bool dup = false;
{ {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) { for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
const Symbol* const name = fs.name(); const Symbol* const name = fs.name();
const Symbol* const sig = fs.signature(); const Symbol* const sig = fs.signature();
@ -2885,7 +2885,7 @@ void ClassFileParser::parse_methods(const ClassFileStream* const cfs,
initialize_hashtable(names_and_sigs); initialize_hashtable(names_and_sigs);
bool dup = false; bool dup = false;
{ {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
const Method* const m = _methods->at(i); const Method* const m = _methods->at(i);
// If no duplicates, add name/signature in hashtable names_and_sigs. // If no duplicates, add name/signature in hashtable names_and_sigs.

View file

@ -574,7 +574,7 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
// actual ClassLoaderData object. // actual ClassLoaderData object.
ClassLoaderData::Dependencies dependencies(CHECK_NULL); ClassLoaderData::Dependencies dependencies(CHECK_NULL);
No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
// ClassLoaderData in the graph since the CLD // ClassLoaderData in the graph since the CLD
// contains unhandled oops // contains unhandled oops

View file

@ -1536,7 +1536,7 @@ class BacktraceBuilder: public StackObj {
objArrayOop _mirrors; objArrayOop _mirrors;
typeArrayOop _cprefs; // needed to insulate method name against redefinition typeArrayOop _cprefs; // needed to insulate method name against redefinition
int _index; int _index;
No_Safepoint_Verifier _nsv; NoSafepointVerifier _nsv;
public: public:
@ -1595,7 +1595,7 @@ class BacktraceBuilder: public StackObj {
void expand(TRAPS) { void expand(TRAPS) {
objArrayHandle old_head(THREAD, _head); objArrayHandle old_head(THREAD, _head);
Pause_No_Safepoint_Verifier pnsv(&_nsv); PauseNoSafepointVerifier pnsv(&_nsv);
objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK); objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK);
objArrayHandle new_head(THREAD, head); objArrayHandle new_head(THREAD, head);

View file

@ -136,7 +136,7 @@ oop StringTable::basic_add(int index_arg, Handle string, jchar* name,
assert(java_lang_String::equals(string(), name, len), assert(java_lang_String::equals(string(), name, len),
"string must be properly initialized"); "string must be properly initialized");
// Cannot hit a safepoint in this function because the "this" pointer can move. // Cannot hit a safepoint in this function because the "this" pointer can move.
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// Check if the symbol table has been rehashed, if so, need to recalculate // Check if the symbol table has been rehashed, if so, need to recalculate
// the hash value and index before second lookup. // the hash value and index before second lookup.

View file

@ -264,7 +264,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
unsigned int hashValue; unsigned int hashValue;
char* name; char* name;
{ {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
name = (char*)sym->base() + begin; name = (char*)sym->base() + begin;
len = end - begin; len = end - begin;
@ -288,7 +288,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
buffer[i] = name[i]; buffer[i] = name[i];
} }
// Make sure there is no safepoint in the code above since name can't move. // Make sure there is no safepoint in the code above since name can't move.
// We can't include the code in No_Safepoint_Verifier because of the // We can't include the code in NoSafepointVerifier because of the
// ResourceMark. // ResourceMark.
// Grab SymbolTable_lock first. // Grab SymbolTable_lock first.
@ -405,7 +405,7 @@ Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
} }
// Cannot hit a safepoint in this function because the "this" pointer can move. // Cannot hit a safepoint in this function because the "this" pointer can move.
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// Check if the symbol table has been rehashed, if so, need to recalculate // Check if the symbol table has been rehashed, if so, need to recalculate
// the hash value and index. // the hash value and index.
@ -454,7 +454,7 @@ bool SymbolTable::basic_add(ClassLoaderData* loader_data, const constantPoolHand
} }
// Cannot hit a safepoint in this function because the "this" pointer can move. // Cannot hit a safepoint in this function because the "this" pointer can move.
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
for (int i=0; i<names_count; i++) { for (int i=0; i<names_count; i++) {
// Check if the symbol table has been rehashed, if so, need to recalculate // Check if the symbol table has been rehashed, if so, need to recalculate

View file

@ -475,11 +475,11 @@ void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
// Note that we have an entry, and entries can be deleted only during GC, // Note that we have an entry, and entries can be deleted only during GC,
// so we cannot allow GC to occur while we're holding this entry. // so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we // We're using a NoSafepointVerifier to catch any place where we
// might potentially do a GC at all. // might potentially do a GC at all.
// Dictionary::do_unloading() asserts that classes in SD are only // Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD. // unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint; NoSafepointVerifier nosafepoint;
dictionary()->add_protection_domain(d_index, d_hash, klass, loader_data, dictionary()->add_protection_domain(d_index, d_hash, klass, loader_data,
protection_domain, THREAD); protection_domain, THREAD);
} }
@ -908,11 +908,11 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
MutexLocker mu(SystemDictionary_lock, THREAD); MutexLocker mu(SystemDictionary_lock, THREAD);
// Note that we have an entry, and entries can be deleted only during GC, // Note that we have an entry, and entries can be deleted only during GC,
// so we cannot allow GC to occur while we're holding this entry. // so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we // We're using a NoSafepointVerifier to catch any place where we
// might potentially do a GC at all. // might potentially do a GC at all.
// Dictionary::do_unloading() asserts that classes in SD are only // Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD. // unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint; NoSafepointVerifier nosafepoint;
if (dictionary()->is_valid_protection_domain(d_index, d_hash, name, if (dictionary()->is_valid_protection_domain(d_index, d_hash, name,
loader_data, loader_data,
protection_domain)) { protection_domain)) {
@ -961,11 +961,11 @@ Klass* SystemDictionary::find(Symbol* class_name,
{ {
// Note that we have an entry, and entries can be deleted only during GC, // Note that we have an entry, and entries can be deleted only during GC,
// so we cannot allow GC to occur while we're holding this entry. // so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we // We're using a NoSafepointVerifier to catch any place where we
// might potentially do a GC at all. // might potentially do a GC at all.
// Dictionary::do_unloading() asserts that classes in SD are only // Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD. // unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint; NoSafepointVerifier nosafepoint;
return dictionary()->find(d_index, d_hash, class_name, loader_data, return dictionary()->find(d_index, d_hash, class_name, loader_data,
protection_domain, THREAD); protection_domain, THREAD);
} }
@ -2210,7 +2210,7 @@ bool SystemDictionary::add_loader_constraint(Symbol* class_name,
MutexLocker mu_s(SystemDictionary_lock, THREAD); MutexLocker mu_s(SystemDictionary_lock, THREAD);
// Better never do a GC while we're holding these oops // Better never do a GC while we're holding these oops
No_Safepoint_Verifier nosafepoint; NoSafepointVerifier nosafepoint;
Klass* klass1 = find_class(d_index1, d_hash1, constraint_name, loader_data1); Klass* klass1 = find_class(d_index1, d_hash1, constraint_name, loader_data1);
Klass* klass2 = find_class(d_index2, d_hash2, constraint_name, loader_data2); Klass* klass2 = find_class(d_index2, d_hash2, constraint_name, loader_data2);

View file

@ -2004,7 +2004,7 @@ bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
Symbol* field_name, Symbol* field_name,
Symbol* field_sig, Symbol* field_sig,
bool is_method) { bool is_method) {
No_Safepoint_Verifier nosafepoint; NoSafepointVerifier nosafepoint;
// If target class isn't a super class of this class, we don't worry about this case // If target class isn't a super class of this class, we don't worry about this case
if (!this_class->is_subclass_of(target_class)) { if (!this_class->is_subclass_of(target_class)) {

View file

@ -1034,7 +1034,7 @@ int CodeCache::mark_for_deoptimization(DepChange& changes) {
// implementor. // implementor.
// nmethod::check_all_dependencies works only correctly, if no safepoint // nmethod::check_all_dependencies works only correctly, if no safepoint
// can happen // can happen
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
for (DepChange::ContextStream str(changes, nsv); str.next(); ) { for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
Klass* d = str.klass(); Klass* d = str.klass();
number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);

View file

@ -59,7 +59,7 @@ class CompileLog;
class DepChange; class DepChange;
class KlassDepChange; class KlassDepChange;
class CallSiteDepChange; class CallSiteDepChange;
class No_Safepoint_Verifier; class NoSafepointVerifier;
class Dependencies: public ResourceObj { class Dependencies: public ResourceObj {
public: public:
@ -713,7 +713,7 @@ class DepChange : public StackObj {
: _changes(changes) : _changes(changes)
{ start(); } { start(); }
ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv) ContextStream(DepChange& changes, NoSafepointVerifier& nsv)
: _changes(changes) : _changes(changes)
// the nsv argument makes it safe to hold oops like _klass // the nsv argument makes it safe to hold oops like _klass
{ start(); } { start(); }

View file

@ -692,7 +692,7 @@ nmethod::nmethod(
_native_basic_lock_sp_offset(basic_lock_sp_offset) _native_basic_lock_sp_offset(basic_lock_sp_offset)
{ {
{ {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
init_defaults(); init_defaults();
@ -796,7 +796,7 @@ nmethod::nmethod(
{ {
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
{ {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
init_defaults(); init_defaults();
@ -1404,7 +1404,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this); nmethodLocker nml(this);
methodHandle the_method(method()); methodHandle the_method(method());
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// during patching, depending on the nmethod state we must notify the GC that // during patching, depending on the nmethod state we must notify the GC that
// code has been unloaded, unregistering it. We cannot do this right while // code has been unloaded, unregistering it. We cannot do this right while

View file

@ -373,7 +373,7 @@ CompileTask* CompileQueue::get() {
CompileTask* task; CompileTask* task;
{ {
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
task = CompilationPolicy::policy()->select_task(this); task = CompilationPolicy::policy()->select_task(this);
} }

View file

@ -37,7 +37,7 @@ class CMSBitMap;
class CMSMarkStack; class CMSMarkStack;
class CMSCollector; class CMSCollector;
class MarkFromRootsClosure; class MarkFromRootsClosure;
class Par_MarkFromRootsClosure; class ParMarkFromRootsClosure;
// Decode the oop and call do_oop on it. // Decode the oop and call do_oop on it.
#define DO_OOP_WORK_DEFN \ #define DO_OOP_WORK_DEFN \
@ -82,14 +82,14 @@ class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
}; };
class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
private: private:
const MemRegion _span; const MemRegion _span;
CMSBitMap* _bitMap; CMSBitMap* _bitMap;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
}; };
@ -141,7 +141,7 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
// synchronization (for instance, via CAS). The marking stack // synchronization (for instance, via CAS). The marking stack
// used in the non-parallel case above is here replaced with // used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing. // an OopTaskQueue structure to allow efficient work stealing.
class Par_PushAndMarkClosure: public MetadataAwareOopClosure { class ParPushAndMarkClosure: public MetadataAwareOopClosure {
private: private:
CMSCollector* _collector; CMSCollector* _collector;
MemRegion _span; MemRegion _span;
@ -150,15 +150,15 @@ class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
Par_PushAndMarkClosure(CMSCollector* collector, ParPushAndMarkClosure(CMSCollector* collector,
MemRegion span, MemRegion span,
ReferenceProcessor* rp, ReferenceProcessor* rp,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue); OopTaskQueue* work_queue);
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
}; };
// The non-parallel version (the parallel version appears further below). // The non-parallel version (the parallel version appears further below).
@ -203,25 +203,25 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
// stack and the bitMap are shared, so access needs to be suitably // stack and the bitMap are shared, so access needs to be suitably
// synchronized. An OopTaskQueue structure, supporting efficient // synchronized. An OopTaskQueue structure, supporting efficient
// work stealing, replaces a CMSMarkStack for storing grey objects. // work stealing, replaces a CMSMarkStack for storing grey objects.
class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
private: private:
MemRegion _span; MemRegion _span;
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
OopTaskQueue* _work_queue; OopTaskQueue* _work_queue;
const uint _low_water_mark; const uint _low_water_mark;
Par_PushAndMarkClosure _par_pushAndMarkClosure; ParPushAndMarkClosure _parPushAndMarkClosure;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
MemRegion span, MemRegion span,
ReferenceProcessor* rp, ReferenceProcessor* rp,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue); OopTaskQueue* work_queue);
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
void trim_queue(uint size); void trim_queue(uint size);
}; };
@ -261,8 +261,8 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
// A parallel (MT) version of the above. // A parallel (MT) version of the above.
// This closure is used during the concurrent marking phase // This closure is used during the concurrent marking phase
// following the first checkpoint. Its use is buried in // following the first checkpoint. Its use is buried in
// the closure Par_MarkFromRootsClosure. // the closure ParMarkFromRootsClosure.
class Par_PushOrMarkClosure: public MetadataAwareOopClosure { class ParPushOrMarkClosure: public MetadataAwareOopClosure {
private: private:
CMSCollector* _collector; CMSCollector* _collector;
MemRegion _whole_span; MemRegion _whole_span;
@ -272,23 +272,23 @@ class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
CMSMarkStack* _overflow_stack; CMSMarkStack* _overflow_stack;
HeapWord* const _finger; HeapWord* const _finger;
HeapWord** const _global_finger_addr; HeapWord** const _global_finger_addr;
Par_MarkFromRootsClosure* const ParMarkFromRootsClosure* const
_parent; _parent;
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
Par_PushOrMarkClosure(CMSCollector* cms_collector, ParPushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span, MemRegion span,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
CMSMarkStack* mark_stack, CMSMarkStack* mark_stack,
HeapWord* finger, HeapWord* finger,
HeapWord** global_finger_addr, HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent); ParMarkFromRootsClosure* parent);
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
// Deal with a stack overflow condition // Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost); void handle_stack_overflow(HeapWord* lost);

View file

@ -31,7 +31,7 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
// Trim our work_queue so its length is below max at return // Trim our work_queue so its length is below max at return
inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) { inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
while (_work_queue->size() > max) { while (_work_queue->size() > max) {
oop newOop; oop newOop;
if (_work_queue->pop_local(newOop)) { if (_work_queue->pop_local(newOop)) {
@ -40,7 +40,7 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
"only grey objects on this stack"); "only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing // iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span). // the ones in CMS heap (i.e. in _span).
newOop->oop_iterate(&_par_pushAndMarkClosure); newOop->oop_iterate(&_parPushAndMarkClosure);
} }
} }
} }

View file

@ -576,7 +576,7 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
} }
} }
class FreeListSpace_DCTOC : public Filtering_DCTOC { class FreeListSpaceDCTOC : public FilteringDCTOC {
CompactibleFreeListSpace* _cfls; CompactibleFreeListSpace* _cfls;
CMSCollector* _collector; CMSCollector* _collector;
bool _parallel; bool _parallel;
@ -596,21 +596,21 @@ protected:
walk_mem_region_with_cl_DECL(FilteringClosure); walk_mem_region_with_cl_DECL(FilteringClosure);
public: public:
FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
CMSCollector* collector, CMSCollector* collector,
ExtendedOopClosure* cl, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary, HeapWord* boundary,
bool parallel) : bool parallel) :
Filtering_DCTOC(sp, cl, precision, boundary), FilteringDCTOC(sp, cl, precision, boundary),
_cfls(sp), _collector(collector), _parallel(parallel) {} _cfls(sp), _collector(collector), _parallel(parallel) {}
}; };
// We de-virtualize the block-related calls below, since we know that our // We de-virtualize the block-related calls below, since we know that our
// space is a CompactibleFreeListSpace. // space is a CompactibleFreeListSpace.
#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
HeapWord* bottom, \ HeapWord* bottom, \
HeapWord* top, \ HeapWord* top, \
ClosureType* cl) { \ ClosureType* cl) { \
@ -620,7 +620,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
} \ } \
} \ } \
void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
HeapWord* bottom, \ HeapWord* bottom, \
HeapWord* top, \ HeapWord* top, \
ClosureType* cl) { \ ClosureType* cl) { \
@ -647,7 +647,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,
} \ } \
} \ } \
} \ } \
void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
HeapWord* bottom, \ HeapWord* bottom, \
HeapWord* top, \ HeapWord* top, \
ClosureType* cl) { \ ClosureType* cl) { \
@ -678,15 +678,15 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
// (There are only two of these, rather than N, because the split is due // (There are only two of these, rather than N, because the split is due
// only to the introduction of the FilteringClosure, a local part of the // only to the introduction of the FilteringClosure, a local part of the
// impl of this abstraction.) // impl of this abstraction.)
FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure* DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary, HeapWord* boundary,
bool parallel) { bool parallel) {
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel); return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
} }
@ -2413,7 +2413,7 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
} }
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// CFLS_LAB // CompactibleFreeListSpaceLAB
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
#define VECTOR_257(x) \ #define VECTOR_257(x) \
@ -2432,12 +2432,12 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
// generic OldPLABSize, whose static default is different; if overridden at the // generic OldPLABSize, whose static default is different; if overridden at the
// command-line, this will get reinitialized via a call to // command-line, this will get reinitialized via a call to
// modify_initialization() below. // modify_initialization() below.
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] = AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[] =
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size)); VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0); size_t CompactibleFreeListSpaceLAB::_global_num_blocks[] = VECTOR_257(0);
uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0); uint CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
_cfls(cfls) _cfls(cfls)
{ {
assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above"); assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
@ -2451,7 +2451,7 @@ CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
static bool _CFLS_LAB_modified = false; static bool _CFLS_LAB_modified = false;
void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
assert(!_CFLS_LAB_modified, "Call only once"); assert(!_CFLS_LAB_modified, "Call only once");
_CFLS_LAB_modified = true; _CFLS_LAB_modified = true;
for (size_t i = CompactibleFreeListSpace::IndexSetStart; for (size_t i = CompactibleFreeListSpace::IndexSetStart;
@ -2461,7 +2461,7 @@ void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
} }
} }
HeapWord* CFLS_LAB::alloc(size_t word_sz) { HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
FreeChunk* res; FreeChunk* res;
assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error"); assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
@ -2491,7 +2491,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
// Get a chunk of blocks of the right size and update related // Get a chunk of blocks of the right size and update related
// book-keeping stats // book-keeping stats
void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) { void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
// Get the #blocks we want to claim // Get the #blocks we want to claim
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
assert(n_blks > 0, "Error"); assert(n_blks > 0, "Error");
@ -2525,7 +2525,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
_num_blocks[word_sz] += fl->count(); _num_blocks[word_sz] += fl->count();
} }
void CFLS_LAB::compute_desired_plab_size() { void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
for (size_t i = CompactibleFreeListSpace::IndexSetStart; for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize; i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) { i += CompactibleFreeListSpace::IndexSetStride) {
@ -2551,7 +2551,7 @@ void CFLS_LAB::compute_desired_plab_size() {
// access, one would need to take the FL locks and, // access, one would need to take the FL locks and,
// depending on how it is used, stagger access from // depending on how it is used, stagger access from
// parallel threads to reduce contention. // parallel threads to reduce contention.
void CFLS_LAB::retire(int tid) { void CompactibleFreeListSpaceLAB::retire(int tid) {
// We run this single threaded with the world stopped; // We run this single threaded with the world stopped;
// so no need for locks and such. // so no need for locks and such.
NOT_PRODUCT(Thread* t = Thread::current();) NOT_PRODUCT(Thread* t = Thread::current();)

View file

@ -75,7 +75,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
friend class ConcurrentMarkSweepGeneration; friend class ConcurrentMarkSweepGeneration;
friend class CMSCollector; friend class CMSCollector;
// Local alloc buffer for promotion into this space. // Local alloc buffer for promotion into this space.
friend class CFLS_LAB; friend class CompactibleFreeListSpaceLAB;
// Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
template <typename SpaceType> template <typename SpaceType>
friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space); friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
@ -662,7 +662,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// A parallel-GC-thread-local allocation buffer for allocation into a // A parallel-GC-thread-local allocation buffer for allocation into a
// CompactibleFreeListSpace. // CompactibleFreeListSpace.
class CFLS_LAB : public CHeapObj<mtGC> { class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
// The space that this buffer allocates into. // The space that this buffer allocates into.
CompactibleFreeListSpace* _cfls; CompactibleFreeListSpace* _cfls;
@ -686,7 +686,7 @@ public:
static const int _default_dynamic_old_plab_size = 16; static const int _default_dynamic_old_plab_size = 16;
static const int _default_static_old_plab_size = 50; static const int _default_static_old_plab_size = 50;
CFLS_LAB(CompactibleFreeListSpace* cfls); CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
// Allocate and return a block of the given size, or else return NULL. // Allocate and return a block of the given size, or else return NULL.
HeapWord* alloc(size_t word_sz); HeapWord* alloc(size_t word_sz);

View file

@ -183,7 +183,7 @@ NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
// young-gen collection. // young-gen collection.
class CMSParGCThreadState: public CHeapObj<mtGC> { class CMSParGCThreadState: public CHeapObj<mtGC> {
public: public:
CFLS_LAB lab; CompactibleFreeListSpaceLAB lab;
PromotionInfo promo; PromotionInfo promo;
// Constructor. // Constructor.
@ -1110,7 +1110,7 @@ bool ConcurrentMarkSweepGeneration::should_collect(bool full,
bool CMSCollector::shouldConcurrentCollect() { bool CMSCollector::shouldConcurrentCollect() {
if (_full_gc_requested) { if (_full_gc_requested) {
log_trace(gc)("CMSCollector: collect because of explicit gc request (or gc_locker)"); log_trace(gc)("CMSCollector: collect because of explicit gc request (or GCLocker)");
return true; return true;
} }
@ -1269,12 +1269,12 @@ void CMSCollector::collect(bool full,
{ {
// The following "if" branch is present for defensive reasons. // The following "if" branch is present for defensive reasons.
// In the current uses of this interface, it can be replaced with: // In the current uses of this interface, it can be replaced with:
// assert(!GC_locker.is_active(), "Can't be called otherwise"); // assert(!GCLocker.is_active(), "Can't be called otherwise");
// But I am not placing that assert here to allow future // But I am not placing that assert here to allow future
// generality in invoking this interface. // generality in invoking this interface.
if (GC_locker::is_active()) { if (GCLocker::is_active()) {
// A consistency test for GC_locker // A consistency test for GCLocker
assert(GC_locker::needs_gc(), "Should have been set already"); assert(GCLocker::needs_gc(), "Should have been set already");
// Skip this foreground collection, instead // Skip this foreground collection, instead
// expanding the heap if necessary. // expanding the heap if necessary.
// Need the free list locks for the call to free() in compute_new_size() // Need the free list locks for the call to free() in compute_new_size()
@ -3272,7 +3272,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
// Do the marking work within a non-empty span -- // Do the marking work within a non-empty span --
// the last argument to the constructor indicates whether the // the last argument to the constructor indicates whether the
// iteration should be incremental with periodic yields. // iteration should be incremental with periodic yields.
Par_MarkFromRootsClosure cl(this, _collector, my_span, ParMarkFromRootsClosure cl(this, _collector, my_span,
&_collector->_markBitMap, &_collector->_markBitMap,
work_queue(i), work_queue(i),
&_collector->_markStack); &_collector->_markStack);
@ -3291,7 +3291,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
pst->all_tasks_completed(); pst->all_tasks_completed();
} }
class Par_ConcMarkingClosure: public MetadataAwareOopClosure { class ParConcMarkingClosure: public MetadataAwareOopClosure {
private: private:
CMSCollector* _collector; CMSCollector* _collector;
CMSConcMarkingTask* _task; CMSConcMarkingTask* _task;
@ -3302,7 +3302,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
protected: protected:
DO_OOP_WORK_DEFN DO_OOP_WORK_DEFN
public: public:
Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack): CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
MetadataAwareOopClosure(collector->ref_processor()), MetadataAwareOopClosure(collector->ref_processor()),
_collector(collector), _collector(collector),
@ -3330,7 +3330,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
// already have been initialized (else they would not have // already have been initialized (else they would not have
// been published), so we do not need to check for // been published), so we do not need to check for
// uninitialized objects before pushing here. // uninitialized objects before pushing here.
void Par_ConcMarkingClosure::do_oop(oop obj) { void ParConcMarkingClosure::do_oop(oop obj) {
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation // Check if oop points into the CMS generation
@ -3366,10 +3366,10 @@ void Par_ConcMarkingClosure::do_oop(oop obj) {
} }
} }
void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } void ParConcMarkingClosure::do_oop(oop* p) { ParConcMarkingClosure::do_oop_work(p); }
void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
void Par_ConcMarkingClosure::trim_queue(size_t max) { void ParConcMarkingClosure::trim_queue(size_t max) {
while (_work_queue->size() > max) { while (_work_queue->size() > max) {
oop new_oop; oop new_oop;
if (_work_queue->pop_local(new_oop)) { if (_work_queue->pop_local(new_oop)) {
@ -3385,7 +3385,7 @@ void Par_ConcMarkingClosure::trim_queue(size_t max) {
// Upon stack overflow, we discard (part of) the stack, // Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded // remembering the least address amongst those discarded
// in CMSCollector's _restart_address. // in CMSCollector's _restart_address.
void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
// We need to do this under a mutex to prevent other // We need to do this under a mutex to prevent other
// workers from interfering with the work done below. // workers from interfering with the work done below.
MutexLockerEx ml(_overflow_stack->par_lock(), MutexLockerEx ml(_overflow_stack->par_lock(),
@ -3404,7 +3404,7 @@ void CMSConcMarkingTask::do_work_steal(int i) {
CMSBitMap* bm = &(_collector->_markBitMap); CMSBitMap* bm = &(_collector->_markBitMap);
CMSMarkStack* ovflw = &(_collector->_markStack); CMSMarkStack* ovflw = &(_collector->_markStack);
int* seed = _collector->hash_seed(i); int* seed = _collector->hash_seed(i);
Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw); ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
while (true) { while (true) {
cl.trim_queue(0); cl.trim_queue(0);
assert(work_q->size() == 0, "Should have been emptied above"); assert(work_q->size() == 0, "Should have been emptied above");
@ -4246,7 +4246,7 @@ void CMSParInitialMarkTask::work(uint worker_id) {
// ---------- scan from roots -------------- // ---------- scan from roots --------------
_timer.start(); _timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
// ---------- young gen roots -------------- // ---------- young gen roots --------------
{ {
@ -4312,10 +4312,10 @@ class CMSParRemarkTask: public CMSParMarkTask {
private: private:
// ... of dirty cards in old space // ... of dirty cards in old space
void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
Par_MarkRefsIntoAndScanClosure* cl); ParMarkRefsIntoAndScanClosure* cl);
// ... work stealing for the above // ... work stealing for the above
void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
}; };
class RemarkKlassClosure : public KlassClosure { class RemarkKlassClosure : public KlassClosure {
@ -4361,7 +4361,7 @@ void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* c
} }
// work_queue(i) is passed to the closure // work_queue(i) is passed to the closure
// Par_MarkRefsIntoAndScanClosure. The "i" parameter // ParMarkRefsIntoAndScanClosure. The "i" parameter
// also is passed to do_dirty_card_rescan_tasks() and to // also is passed to do_dirty_card_rescan_tasks() and to
// do_work_steal() to select the i-th task_queue. // do_work_steal() to select the i-th task_queue.
@ -4373,7 +4373,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// ---------- rescan from roots -------------- // ---------- rescan from roots --------------
_timer.start(); _timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
_collector->_span, _collector->ref_processor(), _collector->_span, _collector->ref_processor(),
&(_collector->_markBitMap), &(_collector->_markBitMap),
work_queue(worker_id)); work_queue(worker_id));
@ -4522,7 +4522,7 @@ CMSParMarkTask::do_young_space_rescan(uint worker_id,
void void
CMSParRemarkTask::do_dirty_card_rescan_tasks( CMSParRemarkTask::do_dirty_card_rescan_tasks(
CompactibleFreeListSpace* sp, int i, CompactibleFreeListSpace* sp, int i,
Par_MarkRefsIntoAndScanClosure* cl) { ParMarkRefsIntoAndScanClosure* cl) {
// Until all tasks completed: // Until all tasks completed:
// . claim an unclaimed task // . claim an unclaimed task
// . compute region boundaries corresponding to task claimed // . compute region boundaries corresponding to task claimed
@ -4614,7 +4614,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
// . see if we can share work_queues with ParNew? XXX // . see if we can share work_queues with ParNew? XXX
void void
CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
int* seed) { int* seed) {
OopTaskQueue* work_q = work_queue(i); OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;) NOT_PRODUCT(int num_steals = 0;)
@ -5832,7 +5832,7 @@ void MarkRefsIntoClosure::do_oop(oop obj) {
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure( ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
MemRegion span, CMSBitMap* bitMap): MemRegion span, CMSBitMap* bitMap):
_span(span), _span(span),
_bitMap(bitMap) _bitMap(bitMap)
@ -5841,7 +5841,7 @@ Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
} }
void Par_MarkRefsIntoClosure::do_oop(oop obj) { void ParMarkRefsIntoClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap // if p points into _span, then mark corresponding bit in _markBitMap
assert(obj->is_oop(), "expected an oop"); assert(obj->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
@ -5851,8 +5851,8 @@ void Par_MarkRefsIntoClosure::do_oop(oop obj) {
} }
} }
void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); } void ParMarkRefsIntoClosure::do_oop(oop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); } void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
// A variant of the above, used for CMS marking verification. // A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
@ -5989,10 +5989,10 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
} }
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
// Par_MarkRefsIntoAndScanClosure: a parallel version of // ParMarkRefsIntoAndScanClosure: a parallel version of
// MarkRefsIntoAndScanClosure // MarkRefsIntoAndScanClosure
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
CMSBitMap* bit_map, OopTaskQueue* work_queue): CMSBitMap* bit_map, OopTaskQueue* work_queue):
_span(span), _span(span),
@ -6000,7 +6000,7 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
_work_queue(work_queue), _work_queue(work_queue),
_low_water_mark(MIN2((work_queue->max_elems()/4), _low_water_mark(MIN2((work_queue->max_elems()/4),
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))), ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue) _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
{ {
// FIXME: Should initialize in base class constructor. // FIXME: Should initialize in base class constructor.
assert(rp != NULL, "ref_processor shouldn't be NULL"); assert(rp != NULL, "ref_processor shouldn't be NULL");
@ -6014,7 +6014,7 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
// the scan phase whence they are also available for stealing by parallel // the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are // threads. Since the marking bit map is shared, updates are
// synchronized (via CAS). // synchronized (via CAS).
void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
if (obj != NULL) { if (obj != NULL) {
// Ignore mark word because this could be an already marked oop // Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list. // that may be chained at the end of the overflow list.
@ -6041,8 +6041,8 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
} }
} }
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } void ParMarkRefsIntoAndScanClosure::do_oop(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
// This closure is used to rescan the marked objects on the dirty cards // This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper. // in the mod union table and the card table proper.
@ -6426,7 +6426,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
} }
Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
CMSCollector* collector, MemRegion span, CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
@ -6449,7 +6449,7 @@ Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
// Should revisit to see if this should be restructured for // Should revisit to see if this should be restructured for
// greater efficiency. // greater efficiency.
bool Par_MarkFromRootsClosure::do_bit(size_t offset) { bool ParMarkFromRootsClosure::do_bit(size_t offset) {
if (_skip_bits > 0) { if (_skip_bits > 0) {
_skip_bits--; _skip_bits--;
return true; return true;
@ -6474,7 +6474,7 @@ bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
return true; return true;
} }
void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
assert(_bit_map->isMarked(ptr), "expected bit to be set"); assert(_bit_map->isMarked(ptr), "expected bit to be set");
// Should we assert that our work queue is empty or // Should we assert that our work queue is empty or
// below some drain limit? // below some drain limit?
@ -6524,7 +6524,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
// Note: the local finger doesn't advance while we drain // Note: the local finger doesn't advance while we drain
// the stack below, but the global finger sure can and will. // the stack below, but the global finger sure can and will.
HeapWord** gfa = _task->global_finger_addr(); HeapWord** gfa = _task->global_finger_addr();
Par_PushOrMarkClosure pushOrMarkClosure(_collector, ParPushOrMarkClosure pushOrMarkClosure(_collector,
_span, _bit_map, _span, _bit_map,
_work_queue, _work_queue,
_overflow_stack, _overflow_stack,
@ -6557,7 +6557,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
// Yield in response to a request from VM Thread or // Yield in response to a request from VM Thread or
// from mutators. // from mutators.
void Par_MarkFromRootsClosure::do_yield_work() { void ParMarkFromRootsClosure::do_yield_work() {
assert(_task != NULL, "sanity"); assert(_task != NULL, "sanity");
_task->yield(); _task->yield();
} }
@ -6684,14 +6684,14 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
_parent(parent) _parent(parent)
{ } { }
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
MemRegion span, MemRegion span,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
CMSMarkStack* overflow_stack, CMSMarkStack* overflow_stack,
HeapWord* finger, HeapWord* finger,
HeapWord** global_finger_addr, HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent) : ParMarkFromRootsClosure* parent) :
MetadataAwareOopClosure(collector->ref_processor()), MetadataAwareOopClosure(collector->ref_processor()),
_collector(collector), _collector(collector),
_whole_span(collector->_span), _whole_span(collector->_span),
@ -6729,7 +6729,7 @@ void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
// Upon stack overflow, we discard (part of) the stack, // Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded // remembering the least address amongst those discarded
// in CMSCollector's _restart_address. // in CMSCollector's _restart_address.
void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
// We need to do this under a mutex to prevent other // We need to do this under a mutex to prevent other
// workers from interfering with the work done below. // workers from interfering with the work done below.
MutexLockerEx ml(_overflow_stack->par_lock(), MutexLockerEx ml(_overflow_stack->par_lock(),
@ -6776,7 +6776,7 @@ void PushOrMarkClosure::do_oop(oop obj) {
void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
void Par_PushOrMarkClosure::do_oop(oop obj) { void ParPushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators. // Ignore mark word because we are running concurrent with mutators.
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;
@ -6822,8 +6822,8 @@ void Par_PushOrMarkClosure::do_oop(oop obj) {
} }
} }
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } void ParPushOrMarkClosure::do_oop(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span, MemRegion span,
@ -6900,7 +6900,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
} }
} }
Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
MemRegion span, MemRegion span,
ReferenceProcessor* rp, ReferenceProcessor* rp,
CMSBitMap* bit_map, CMSBitMap* bit_map,
@ -6919,7 +6919,7 @@ void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(
// Grey object rescan during second checkpoint phase -- // Grey object rescan during second checkpoint phase --
// the parallel version. // the parallel version.
void Par_PushAndMarkClosure::do_oop(oop obj) { void ParPushAndMarkClosure::do_oop(oop obj) {
// In the assert below, we ignore the mark word because // In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is // this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has // on the overflow stack (in which case the mark word has
@ -6959,8 +6959,8 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
} }
} }
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void ParPushAndMarkClosure::do_oop(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
void CMSPrecleanRefsYieldClosure::do_yield_work() { void CMSPrecleanRefsYieldClosure::do_yield_work() {
Mutex* bml = _collector->bitMapLock(); Mutex* bml = _collector->bitMapLock();

View file

@ -510,17 +510,17 @@ class CMSCollector: public CHeapObj<mtGC> {
friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
friend class SurvivorSpacePrecleanClosure; // --- ditto ------- friend class SurvivorSpacePrecleanClosure; // --- ditto -------
friend class PushOrMarkClosure; // to access _restart_addr friend class PushOrMarkClosure; // to access _restart_addr
friend class Par_PushOrMarkClosure; // to access _restart_addr friend class ParPushOrMarkClosure; // to access _restart_addr
friend class MarkFromRootsClosure; // -- ditto -- friend class MarkFromRootsClosure; // -- ditto --
// ... and for clearing cards // ... and for clearing cards
friend class Par_MarkFromRootsClosure; // to access _restart_addr friend class ParMarkFromRootsClosure; // to access _restart_addr
// ... and for clearing cards // ... and for clearing cards
friend class Par_ConcMarkingClosure; // to access _restart_addr etc. friend class ParConcMarkingClosure; // to access _restart_addr etc.
friend class MarkFromRootsVerifyClosure; // to access _restart_addr friend class MarkFromRootsVerifyClosure; // to access _restart_addr
friend class PushAndMarkVerifyClosure; // -- ditto -- friend class PushAndMarkVerifyClosure; // -- ditto --
friend class MarkRefsIntoAndScanClosure; // to access _overflow_list friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
friend class PushAndMarkClosure; // -- ditto -- friend class PushAndMarkClosure; // -- ditto --
friend class Par_PushAndMarkClosure; // -- ditto -- friend class ParPushAndMarkClosure; // -- ditto --
friend class CMSKeepAliveClosure; // -- ditto -- friend class CMSKeepAliveClosure; // -- ditto --
friend class CMSDrainMarkingStackClosure; // -- ditto -- friend class CMSDrainMarkingStackClosure; // -- ditto --
friend class CMSInnerParMarkAndPushClosure; // -- ditto -- friend class CMSInnerParMarkAndPushClosure; // -- ditto --
@ -1282,7 +1282,7 @@ class MarkFromRootsClosure: public BitMapClosure {
// marking from the roots following the first checkpoint. // marking from the roots following the first checkpoint.
// XXX This should really be a subclass of The serial version // XXX This should really be a subclass of The serial version
// above, but i have not had the time to refactor things cleanly. // above, but i have not had the time to refactor things cleanly.
class Par_MarkFromRootsClosure: public BitMapClosure { class ParMarkFromRootsClosure: public BitMapClosure {
CMSCollector* _collector; CMSCollector* _collector;
MemRegion _whole_span; MemRegion _whole_span;
MemRegion _span; MemRegion _span;
@ -1295,7 +1295,7 @@ class Par_MarkFromRootsClosure: public BitMapClosure {
HeapWord* _threshold; HeapWord* _threshold;
CMSConcMarkingTask* _task; CMSConcMarkingTask* _task;
public: public:
Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
MemRegion span, MemRegion span,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
@ -1401,7 +1401,7 @@ class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
CMSBitMap* _bit_map; CMSBitMap* _bit_map;
union { union {
MarkRefsIntoAndScanClosure* _scan_closure; MarkRefsIntoAndScanClosure* _scan_closure;
Par_MarkRefsIntoAndScanClosure* _par_scan_closure; ParMarkRefsIntoAndScanClosure* _par_scan_closure;
}; };
public: public:
@ -1425,7 +1425,7 @@ class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
ReferenceProcessor* rp, ReferenceProcessor* rp,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
Par_MarkRefsIntoAndScanClosure* cl): ParMarkRefsIntoAndScanClosure* cl):
#ifdef ASSERT #ifdef ASSERT
_collector(collector), _collector(collector),
_span(span), _span(span),
@ -1470,7 +1470,7 @@ class MarkFromDirtyCardsClosure: public MemRegionClosure {
CompactibleFreeListSpace* space, CompactibleFreeListSpace* space,
CMSBitMap* bit_map, CMSBitMap* bit_map,
OopTaskQueue* work_queue, OopTaskQueue* work_queue,
Par_MarkRefsIntoAndScanClosure* cl): ParMarkRefsIntoAndScanClosure* cl):
_space(space), _space(space),
_num_dirty_cards(0), _num_dirty_cards(0),
_scan_cl(collector, span, collector->ref_processor(), bit_map, _scan_cl(collector, span, collector->ref_processor(), bit_map,

View file

@ -381,7 +381,7 @@ inline void MarkFromRootsClosure::do_yield_check() {
} }
} }
inline void Par_MarkFromRootsClosure::do_yield_check() { inline void ParMarkFromRootsClosure::do_yield_check() {
if (ConcurrentMarkSweepThread::should_yield() && if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive()) { !_collector->foregroundGCIsActive()) {
do_yield_work(); do_yield_work();
@ -392,7 +392,7 @@ inline void PushOrMarkClosure::do_yield_check() {
_parent->do_yield_check(); _parent->do_yield_check();
} }
inline void Par_PushOrMarkClosure::do_yield_check() { inline void ParPushOrMarkClosure::do_yield_check() {
_parent->do_yield_check(); _parent->do_yield_check();
} }

View file

@ -455,7 +455,7 @@ void ParScanThreadStateSet::flush() {
// Every thread has its own age table. We need to merge // Every thread has its own age table. We need to merge
// them all into one. // them all into one.
ageTable *local_table = par_scan_state.age_table(); AgeTable *local_table = par_scan_state.age_table();
_young_gen.age_table()->merge(local_table); _young_gen.age_table()->merge(local_table);
// Inform old gen that we're done. // Inform old gen that we're done.
@ -469,7 +469,7 @@ void ParScanThreadStateSet::flush() {
// to avoid this by reorganizing the code a bit, I am loathe // to avoid this by reorganizing the code a bit, I am loathe
// to do that unless we find cases where ergo leads to bad // to do that unless we find cases where ergo leads to bad
// performance. // performance.
CFLS_LAB::compute_desired_plab_size(); CompactibleFreeListSpaceLAB::compute_desired_plab_size();
} }
} }

View file

@ -94,7 +94,7 @@ class ParScanThreadState {
int _hash_seed; int _hash_seed;
int _thread_num; int _thread_num;
ageTable _ageTable; AgeTable _ageTable;
bool _to_space_full; bool _to_space_full;
@ -132,7 +132,7 @@ class ParScanThreadState {
ParallelTaskTerminator& term_); ParallelTaskTerminator& term_);
public: public:
ageTable* age_table() {return &_ageTable;} AgeTable* age_table() {return &_ageTable;}
ObjToScanQueue* work_queue() { return _work_queue; } ObjToScanQueue* work_queue() { return _work_queue; }

View file

@ -203,7 +203,7 @@ void VM_GenCollectFullConcurrent::doit() {
gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen); gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
} // Else no need for a foreground young gc } // Else no need for a foreground young gc
assert((_gc_count_before < gch->total_collections()) || assert((_gc_count_before < gch->total_collections()) ||
(GC_locker::is_active() /* gc may have been skipped */ (GCLocker::is_active() /* gc may have been skipped */
&& (_gc_count_before == gch->total_collections())), && (_gc_count_before == gch->total_collections())),
"total_collections() should be monotonically increasing"); "total_collections() should be monotonically increasing");

View file

@ -601,7 +601,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
return result; return result;
} }
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
if (g1_policy()->can_expand_young_list()) { if (g1_policy()->can_expand_young_list()) {
// No need for an ergo verbose message here, // No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true. // can_expand_young_list() does this when it returns true.
@ -617,7 +617,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// returns true). In this case we do not try this GC and // returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and // wait until the GCLocker initiated GC is performed, and
// then retry the allocation. // then retry the allocation.
if (GC_locker::needs_gc()) { if (GCLocker::needs_gc()) {
should_try_gc = false; should_try_gc = false;
} else { } else {
// Read the GC count while still holding the Heap_lock. // Read the GC count while still holding the Heap_lock.
@ -653,7 +653,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// The GCLocker is either active or the GCLocker initiated // The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and // GC has not yet been performed. Stall until it is and
// then retry the allocation. // then retry the allocation.
GC_locker::stall_until_clear(); GCLocker::stall_until_clear();
(*gclocker_retry_count_ret) += 1; (*gclocker_retry_count_ret) += 1;
} }
@ -1028,7 +1028,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
return result; return result;
} }
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
should_try_gc = false; should_try_gc = false;
} else { } else {
// The GCLocker may not be active but the GCLocker initiated // The GCLocker may not be active but the GCLocker initiated
@ -1036,7 +1036,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// returns true). In this case we do not try this GC and // returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and // wait until the GCLocker initiated GC is performed, and
// then retry the allocation. // then retry the allocation.
if (GC_locker::needs_gc()) { if (GCLocker::needs_gc()) {
should_try_gc = false; should_try_gc = false;
} else { } else {
// Read the GC count while still holding the Heap_lock. // Read the GC count while still holding the Heap_lock.
@ -1076,7 +1076,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// The GCLocker is either active or the GCLocker initiated // The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and // GC has not yet been performed. Stall until it is and
// then retry the allocation. // then retry the allocation.
GC_locker::stall_until_clear(); GCLocker::stall_until_clear();
(*gclocker_retry_count_ret) += 1; (*gclocker_retry_count_ret) += 1;
} }
@ -1211,7 +1211,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
bool clear_all_soft_refs) { bool clear_all_soft_refs) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint(true /* should_be_vm_thread */);
if (GC_locker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
return false; return false;
} }
@ -2396,8 +2396,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
} }
if (retry_gc) { if (retry_gc) {
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
GC_locker::stall_until_clear(); GCLocker::stall_until_clear();
} }
} }
} }
@ -3629,7 +3629,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint(true /* should_be_vm_thread */);
guarantee(!is_gc_active(), "collection is not reentrant"); guarantee(!is_gc_active(), "collection is not reentrant");
if (GC_locker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
return false; return false;
} }

View file

@ -841,7 +841,7 @@ private:
HeapRegion* _recorded_survivor_head; HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail; HeapRegion* _recorded_survivor_tail;
ageTable _survivors_age_table; AgeTable _survivors_age_table;
public: public:
uint tenuring_threshold() const { return _tenuring_threshold; } uint tenuring_threshold() const { return _tenuring_threshold; }
@ -882,7 +882,7 @@ public:
return _recorded_survivor_regions; return _recorded_survivor_regions;
} }
void record_age_table(ageTable* age_table) { void record_age_table(AgeTable* age_table) {
_survivors_age_table.merge(age_table); _survivors_age_table.merge(age_table);
} }

View file

@ -50,7 +50,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
G1PLABAllocator* _plab_allocator; G1PLABAllocator* _plab_allocator;
ageTable _age_table; AgeTable _age_table;
InCSetState _dest[InCSetState::Num]; InCSetState _dest[InCSetState::Num];
// Local tenuring threshold. // Local tenuring threshold.
uint _tenuring_threshold; uint _tenuring_threshold;

View file

@ -96,7 +96,7 @@ void G1StringDedupQueue::push(uint worker_id, oop java_string) {
oop G1StringDedupQueue::pop() { oop G1StringDedupQueue::pop() {
assert(!SafepointSynchronize::is_at_safepoint(), "Must not be at safepoint"); assert(!SafepointSynchronize::is_at_safepoint(), "Must not be at safepoint");
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// Try all queues before giving up // Try all queues before giving up
for (size_t tries = 0; tries < _queue->_nqueues; tries++) { for (size_t tries = 0; tries < _queue->_nqueues; tries++) {

View file

@ -299,7 +299,7 @@ unsigned int G1StringDedupTable::hash_code(typeArrayOop value, bool latin1) {
void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) { void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) {
assert(java_lang_String::is_instance(java_string), "Must be a string"); assert(java_lang_String::is_instance(java_string), "Must be a string");
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
stat.inc_inspected(); stat.inc_inspected();

View file

@ -250,7 +250,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
} }
// Failed to allocate without a gc. // Failed to allocate without a gc.
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is // GC allowed. When the critical section clears, a GC is
@ -260,7 +260,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
JavaThread* jthr = JavaThread::current(); JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) { if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock); MutexUnlocker mul(Heap_lock);
GC_locker::stall_until_clear(); GCLocker::stall_until_clear();
gclocker_stalled_count += 1; gclocker_stalled_count += 1;
continue; continue;
} else { } else {
@ -350,7 +350,7 @@ ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
} }
HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) { HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) { if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
// Size is too big for eden, or gc is locked out. // Size is too big for eden, or gc is locked out.
return old_gen()->allocate(size); return old_gen()->allocate(size);
} }

View file

@ -109,7 +109,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity"); assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
return false; return false;
} }

View file

@ -257,7 +257,7 @@ void PSOldGen::expand(size_t bytes) {
success = expand_to_reserved(); success = expand_to_reserved();
} }
if (success && GC_locker::is_active_and_needs_gc()) { if (success && GCLocker::is_active_and_needs_gc()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead"); log_debug(gc)("Garbage collection disabled, expanded heap instead");
} }
} }

View file

@ -1717,7 +1717,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity"); assert(ref_processor() != NULL, "Sanity");
if (GC_locker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
return false; return false;
} }

View file

@ -268,7 +268,7 @@ bool PSScavenge::invoke_no_policy() {
scavenge_entry.update(); scavenge_entry.update();
if (GC_locker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
return false; return false;
} }

View file

@ -45,7 +45,7 @@ void VM_ParallelGCFailedAllocation::doit() {
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
_result = heap->failed_mem_allocate(_word_size); _result = heap->failed_mem_allocate(_word_size);
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
} }

View file

@ -357,7 +357,7 @@ bool DefNewGeneration::expand(size_t bytes) {
// For example if the first expand fail for unknown reasons, // For example if the first expand fail for unknown reasons,
// but the second succeeds and expands the heap to its maximum // but the second succeeds and expands the heap to its maximum
// value. // value.
if (GC_locker::is_active()) { if (GCLocker::is_active()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead"); log_debug(gc)("Garbage collection disabled, expanded heap instead");
} }
@ -527,7 +527,7 @@ void DefNewGeneration::space_iterate(SpaceClosure* blk,
// The last collection bailed out, we are running out of heap space, // The last collection bailed out, we are running out of heap space,
// so we try to allocate the from-space, too. // so we try to allocate the from-space, too.
HeapWord* DefNewGeneration::allocate_from_space(size_t size) { HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
bool should_try_alloc = should_allocate_from_space() || GC_locker::is_active_and_needs_gc(); bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
// If the Heap_lock is not locked by this thread, this will be called // If the Heap_lock is not locked by this thread, this will be called
// again later with the Heap_lock held. // again later with the Heap_lock held.
@ -910,7 +910,7 @@ bool DefNewGeneration::collection_attempt_is_safe() {
void DefNewGeneration::gc_epilogue(bool full) { void DefNewGeneration::gc_epilogue(bool full) {
DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
assert(!GC_locker::is_active(), "We should not be executing here"); assert(!GCLocker::is_active(), "We should not be executing here");
// Check if the heap is approaching full after a collection has // Check if the heap is approaching full after a collection has
// been done. Generally the young generation is empty at // been done. Generally the young generation is empty at
// a minimum at the end of a collection. If it is not, then // a minimum at the end of a collection. If it is not, then

View file

@ -47,11 +47,11 @@ class DefNewGeneration: public Generation {
protected: protected:
Generation* _old_gen; Generation* _old_gen;
uint _tenuring_threshold; // Tenuring threshold for next collection. uint _tenuring_threshold; // Tenuring threshold for next collection.
ageTable _age_table; AgeTable _age_table;
// Size of object to pretenure in words; command line provides bytes // Size of object to pretenure in words; command line provides bytes
size_t _pretenure_size_threshold_words; size_t _pretenure_size_threshold_words;
ageTable* age_table() { return &_age_table; } AgeTable* age_table() { return &_age_table; }
// Initialize state to optimistically assume no promotion failure will // Initialize state to optimistically assume no promotion failure will
// happen. // happen.

View file

@ -34,7 +34,7 @@
/* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University. /* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
See the LICENSE file for license information. */ See the LICENSE file for license information. */
ageTable::ageTable(bool global) { AgeTable::AgeTable(bool global) {
clear(); clear();
@ -61,19 +61,19 @@ ageTable::ageTable(bool global) {
} }
} }
void ageTable::clear() { void AgeTable::clear() {
for (size_t* p = sizes; p < sizes + table_size; ++p) { for (size_t* p = sizes; p < sizes + table_size; ++p) {
*p = 0; *p = 0;
} }
} }
void ageTable::merge(ageTable* subTable) { void AgeTable::merge(AgeTable* subTable) {
for (int i = 0; i < table_size; i++) { for (int i = 0; i < table_size; i++) {
sizes[i]+= subTable->sizes[i]; sizes[i]+= subTable->sizes[i];
} }
} }
uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) { uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100); size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
uint result; uint result;

View file

@ -38,7 +38,7 @@ class GCPolicyCounters;
// //
// Note: all sizes are in oops // Note: all sizes are in oops
class ageTable VALUE_OBJ_CLASS_SPEC { class AgeTable VALUE_OBJ_CLASS_SPEC {
friend class VMStructs; friend class VMStructs;
public: public:
@ -50,7 +50,7 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
// constructor. "global" indicates that this is the global age table // constructor. "global" indicates that this is the global age table
// (as opposed to gc-thread-local) // (as opposed to gc-thread-local)
ageTable(bool global = true); AgeTable(bool global = true);
// clear table // clear table
void clear(); void clear();
@ -67,7 +67,7 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
// Merge another age table with the current one. Used // Merge another age table with the current one. Used
// for parallel young generation gc. // for parallel young generation gc.
void merge(ageTable* subTable); void merge(AgeTable* subTable);
// calculate new tenuring threshold based on age information // calculate new tenuring threshold based on age information
uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters); uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);

View file

@ -131,7 +131,7 @@ bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
if (!success) { if (!success) {
success = grow_to_reserved(); success = grow_to_reserved();
} }
if (success && GC_locker::is_active_and_needs_gc()) { if (success && GCLocker::is_active_and_needs_gc()) {
log_trace(gc, heap)("Garbage collection disabled, expanded heap instead"); log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
} }

View file

@ -620,7 +620,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
return result; return result;
} }
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
if (is_tlab) { if (is_tlab) {
return NULL; // Caller will retry allocating individual object. return NULL; // Caller will retry allocating individual object.
} }
@ -647,7 +647,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
if (!jthr->in_critical()) { if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock); MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited // Wait for JNI critical section to be exited
GC_locker::stall_until_clear(); GCLocker::stall_until_clear();
gclocker_stalled_count += 1; gclocker_stalled_count += 1;
continue; continue;
} else { } else {
@ -728,7 +728,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
HeapWord* result = NULL; HeapWord* result = NULL;
assert(size != 0, "Precondition violated"); assert(size != 0, "Precondition violated");
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
// GC locker is active; instead of a collection we will attempt // GC locker is active; instead of a collection we will attempt
// to expand the heap, if there's room for expansion. // to expand the heap, if there's room for expansion.
if (!gch->is_maximal_no_gc()) { if (!gch->is_maximal_no_gc()) {
@ -815,8 +815,8 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
return result; return result;
} }
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
// If the GC_locker is active, just expand and allocate. // If the GCLocker is active, just expand and allocate.
// If that does not succeed, wait if this thread is not // If that does not succeed, wait if this thread is not
// in a critical section itself. // in a critical section itself.
result = result =
@ -828,7 +828,7 @@ MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
JavaThread* jthr = JavaThread::current(); JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) { if (!jthr->in_critical()) {
// Wait for JNI critical section to be exited // Wait for JNI critical section to be exited
GC_locker::stall_until_clear(); GCLocker::stall_until_clear();
// The GC invoked by the last thread leaving the critical // The GC invoked by the last thread leaving the critical
// section will be a young collection and a full collection // section will be a young collection and a full collection
// is (currently) needed for unloading classes so continue // is (currently) needed for unloading classes so continue
@ -887,7 +887,7 @@ bool GenCollectorPolicy::should_try_older_generation_allocation(
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t young_capacity = gch->young_gen()->capacity_before_gc(); size_t young_capacity = gch->young_gen()->capacity_before_gc();
return (word_size > heap_word_size(young_capacity)) return (word_size > heap_word_size(young_capacity))
|| GC_locker::is_active_and_needs_gc() || GCLocker::is_active_and_needs_gc()
|| gch->incremental_collection_failed(); || gch->incremental_collection_failed();
} }

View file

@ -30,17 +30,17 @@
#include "runtime/atomic.inline.hpp" #include "runtime/atomic.inline.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
volatile jint GC_locker::_jni_lock_count = 0; volatile jint GCLocker::_jni_lock_count = 0;
volatile bool GC_locker::_needs_gc = false; volatile bool GCLocker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false; volatile bool GCLocker::_doing_gc = false;
#ifdef ASSERT #ifdef ASSERT
volatile jint GC_locker::_debug_jni_lock_count = 0; volatile jint GCLocker::_debug_jni_lock_count = 0;
#endif #endif
#ifdef ASSERT #ifdef ASSERT
void GC_locker::verify_critical_count() { void GCLocker::verify_critical_count() {
if (SafepointSynchronize::is_at_safepoint()) { if (SafepointSynchronize::is_at_safepoint()) {
assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree"); assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
int count = 0; int count = 0;
@ -63,18 +63,18 @@ void GC_locker::verify_critical_count() {
} }
// In debug mode track the locking state at all times // In debug mode track the locking state at all times
void GC_locker::increment_debug_jni_lock_count() { void GCLocker::increment_debug_jni_lock_count() {
assert(_debug_jni_lock_count >= 0, "bad value"); assert(_debug_jni_lock_count >= 0, "bad value");
Atomic::inc(&_debug_jni_lock_count); Atomic::inc(&_debug_jni_lock_count);
} }
void GC_locker::decrement_debug_jni_lock_count() { void GCLocker::decrement_debug_jni_lock_count() {
assert(_debug_jni_lock_count > 0, "bad value"); assert(_debug_jni_lock_count > 0, "bad value");
Atomic::dec(&_debug_jni_lock_count); Atomic::dec(&_debug_jni_lock_count);
} }
#endif #endif
void GC_locker::log_debug_jni(const char* msg) { void GCLocker::log_debug_jni(const char* msg) {
LogHandle(gc, jni) log; LogHandle(gc, jni) log;
if (log.is_debug()) { if (log.is_debug()) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8 ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
@ -82,7 +82,7 @@ void GC_locker::log_debug_jni(const char* msg) {
} }
} }
bool GC_locker::check_active_before_gc() { bool GCLocker::check_active_before_gc() {
assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
if (is_active() && !_needs_gc) { if (is_active() && !_needs_gc) {
verify_critical_count(); verify_critical_count();
@ -92,7 +92,7 @@ bool GC_locker::check_active_before_gc() {
return is_active(); return is_active();
} }
void GC_locker::stall_until_clear() { void GCLocker::stall_until_clear() {
assert(!JavaThread::current()->in_critical(), "Would deadlock"); assert(!JavaThread::current()->in_critical(), "Would deadlock");
MutexLocker ml(JNICritical_lock); MutexLocker ml(JNICritical_lock);
@ -106,7 +106,7 @@ void GC_locker::stall_until_clear() {
} }
} }
void GC_locker::jni_lock(JavaThread* thread) { void GCLocker::jni_lock(JavaThread* thread) {
assert(!thread->in_critical(), "shouldn't currently be in a critical region"); assert(!thread->in_critical(), "shouldn't currently be in a critical region");
MutexLocker mu(JNICritical_lock); MutexLocker mu(JNICritical_lock);
// Block entering threads if we know at least one thread is in a // Block entering threads if we know at least one thread is in a
@ -122,7 +122,7 @@ void GC_locker::jni_lock(JavaThread* thread) {
increment_debug_jni_lock_count(); increment_debug_jni_lock_count();
} }
void GC_locker::jni_unlock(JavaThread* thread) { void GCLocker::jni_unlock(JavaThread* thread) {
assert(thread->in_last_critical(), "should be exiting critical region"); assert(thread->in_last_critical(), "should be exiting critical region");
MutexLocker mu(JNICritical_lock); MutexLocker mu(JNICritical_lock);
_jni_lock_count--; _jni_lock_count--;
@ -143,49 +143,49 @@ void GC_locker::jni_unlock(JavaThread* thread) {
} }
} }
// Implementation of No_GC_Verifier // Implementation of NoGCVerifier
#ifdef ASSERT #ifdef ASSERT
No_GC_Verifier::No_GC_Verifier(bool verifygc) { NoGCVerifier::NoGCVerifier(bool verifygc) {
_verifygc = verifygc; _verifygc = verifygc;
if (_verifygc) { if (_verifygc) {
CollectedHeap* h = Universe::heap(); CollectedHeap* h = Universe::heap();
assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); assert(!h->is_gc_active(), "GC active during NoGCVerifier");
_old_invocations = h->total_collections(); _old_invocations = h->total_collections();
} }
} }
No_GC_Verifier::~No_GC_Verifier() { NoGCVerifier::~NoGCVerifier() {
if (_verifygc) { if (_verifygc) {
CollectedHeap* h = Universe::heap(); CollectedHeap* h = Universe::heap();
assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); assert(!h->is_gc_active(), "GC active during NoGCVerifier");
if (_old_invocations != h->total_collections()) { if (_old_invocations != h->total_collections()) {
fatal("collection in a No_GC_Verifier secured function"); fatal("collection in a NoGCVerifier secured function");
} }
} }
} }
Pause_No_GC_Verifier::Pause_No_GC_Verifier(No_GC_Verifier * ngcv) { PauseNoGCVerifier::PauseNoGCVerifier(NoGCVerifier * ngcv) {
_ngcv = ngcv; _ngcv = ngcv;
if (_ngcv->_verifygc) { if (_ngcv->_verifygc) {
// if we were verifying, then make sure that nothing is // if we were verifying, then make sure that nothing is
// wrong before we "pause" verification // wrong before we "pause" verification
CollectedHeap* h = Universe::heap(); CollectedHeap* h = Universe::heap();
assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); assert(!h->is_gc_active(), "GC active during NoGCVerifier");
if (_ngcv->_old_invocations != h->total_collections()) { if (_ngcv->_old_invocations != h->total_collections()) {
fatal("collection in a No_GC_Verifier secured function"); fatal("collection in a NoGCVerifier secured function");
} }
} }
} }
Pause_No_GC_Verifier::~Pause_No_GC_Verifier() { PauseNoGCVerifier::~PauseNoGCVerifier() {
if (_ngcv->_verifygc) { if (_ngcv->_verifygc) {
// if we were verifying before, then reenable verification // if we were verifying before, then reenable verification
CollectedHeap* h = Universe::heap(); CollectedHeap* h = Universe::heap();
assert(!h->is_gc_active(), "GC active during No_GC_Verifier"); assert(!h->is_gc_active(), "GC active during NoGCVerifier");
_ngcv->_old_invocations = h->total_collections(); _ngcv->_old_invocations = h->total_collections();
} }
} }
@ -201,16 +201,16 @@ Pause_No_GC_Verifier::~Pause_No_GC_Verifier() {
// 6) reaching a safepoint // 6) reaching a safepoint
// 7) running too long // 7) running too long
// Nor may any method it calls. // Nor may any method it calls.
JRT_Leaf_Verifier::JRT_Leaf_Verifier() JRTLeafVerifier::JRTLeafVerifier()
: No_Safepoint_Verifier(true, JRT_Leaf_Verifier::should_verify_GC()) : NoSafepointVerifier(true, JRTLeafVerifier::should_verify_GC())
{ {
} }
JRT_Leaf_Verifier::~JRT_Leaf_Verifier() JRTLeafVerifier::~JRTLeafVerifier()
{ {
} }
bool JRT_Leaf_Verifier::should_verify_GC() { bool JRTLeafVerifier::should_verify_GC() {
switch (JavaThread::current()->thread_state()) { switch (JavaThread::current()->thread_state()) {
case _thread_in_Java: case _thread_in_Java:
// is in a leaf routine, there must be no safepoint. // is in a leaf routine, there must be no safepoint.

View file

@ -33,12 +33,12 @@
// The direct lock/unlock calls do not force a collection if an unlock // The direct lock/unlock calls do not force a collection if an unlock
// decrements the count to zero. Avoid calling these if at all possible. // decrements the count to zero. Avoid calling these if at all possible.
class GC_locker: public AllStatic { class GCLocker: public AllStatic {
private: private:
// The _jni_lock_count keeps track of the number of threads that are // The _jni_lock_count keeps track of the number of threads that are
// currently in a critical region. It's only kept up to date when // currently in a critical region. It's only kept up to date when
// _needs_gc is true. The current value is computed during // _needs_gc is true. The current value is computed during
// safepointing and decremented during the slow path of GC_locker // safepointing and decremented during the slow path of GCLocker
// unlocking. // unlocking.
static volatile jint _jni_lock_count; // number of jni active instances. static volatile jint _jni_lock_count; // number of jni active instances.
static volatile bool _needs_gc; // heap is filling, we need a GC static volatile bool _needs_gc; // heap is filling, we need a GC
@ -103,7 +103,7 @@ class GC_locker: public AllStatic {
static void stall_until_clear(); static void stall_until_clear();
// The following two methods are used for JNI critical regions. // The following two methods are used for JNI critical regions.
// If we find that we failed to perform a GC because the GC_locker // If we find that we failed to perform a GC because the GCLocker
// was active, arrange for one as soon as possible by allowing // was active, arrange for one as soon as possible by allowing
// all threads in critical regions to complete, but not allowing // all threads in critical regions to complete, but not allowing
// other critical regions to be entered. The reasons for that are: // other critical regions to be entered. The reasons for that are:
@ -126,7 +126,7 @@ class GC_locker: public AllStatic {
// _needs_gc is initially false and every java thread will go // _needs_gc is initially false and every java thread will go
// through the fast path, which simply increments or decrements the // through the fast path, which simply increments or decrements the
// current thread's critical count. When GC happens at a safepoint, // current thread's critical count. When GC happens at a safepoint,
// GC_locker::is_active() is checked. Since there is no safepoint in // GCLocker::is_active() is checked. Since there is no safepoint in
// the fast path of lock_critical() and unlock_critical(), there is // the fast path of lock_critical() and unlock_critical(), there is
// no race condition between the fast path and GC. After _needs_gc // no race condition between the fast path and GC. After _needs_gc
// is set at a safepoint, every thread will go through the slow path // is set at a safepoint, every thread will go through the slow path
@ -142,14 +142,14 @@ class GC_locker: public AllStatic {
}; };
// A No_GC_Verifier object can be placed in methods where one assumes that // A NoGCVerifier object can be placed in methods where one assumes that
// no garbage collection will occur. The destructor will verify this property // no garbage collection will occur. The destructor will verify this property
// unless the constructor is called with argument false (not verifygc). // unless the constructor is called with argument false (not verifygc).
// //
// The check will only be done in debug mode and if verifygc true. // The check will only be done in debug mode and if verifygc true.
class No_GC_Verifier: public StackObj { class NoGCVerifier: public StackObj {
friend class Pause_No_GC_Verifier; friend class PauseNoGCVerifier;
protected: protected:
bool _verifygc; bool _verifygc;
@ -157,51 +157,51 @@ class No_GC_Verifier: public StackObj {
public: public:
#ifdef ASSERT #ifdef ASSERT
No_GC_Verifier(bool verifygc = true); NoGCVerifier(bool verifygc = true);
~No_GC_Verifier(); ~NoGCVerifier();
#else #else
No_GC_Verifier(bool verifygc = true) {} NoGCVerifier(bool verifygc = true) {}
~No_GC_Verifier() {} ~NoGCVerifier() {}
#endif #endif
}; };
// A Pause_No_GC_Verifier is used to temporarily pause the behavior // A PauseNoGCVerifier is used to temporarily pause the behavior
// of a No_GC_Verifier object. If we are not in debug mode or if the // of a NoGCVerifier object. If we are not in debug mode or if the
// No_GC_Verifier object has a _verifygc value of false, then there // NoGCVerifier object has a _verifygc value of false, then there
// is nothing to do. // is nothing to do.
class Pause_No_GC_Verifier: public StackObj { class PauseNoGCVerifier: public StackObj {
private: private:
No_GC_Verifier * _ngcv; NoGCVerifier * _ngcv;
public: public:
#ifdef ASSERT #ifdef ASSERT
Pause_No_GC_Verifier(No_GC_Verifier * ngcv); PauseNoGCVerifier(NoGCVerifier * ngcv);
~Pause_No_GC_Verifier(); ~PauseNoGCVerifier();
#else #else
Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {} PauseNoGCVerifier(NoGCVerifier * ngcv) {}
~Pause_No_GC_Verifier() {} ~PauseNoGCVerifier() {}
#endif #endif
}; };
// A No_Safepoint_Verifier object will throw an assertion failure if // A NoSafepointVerifier object will throw an assertion failure if
// the current thread passes a possible safepoint while this object is // the current thread passes a possible safepoint while this object is
// instantiated. A safepoint, will either be: an oop allocation, blocking // instantiated. A safepoint, will either be: an oop allocation, blocking
// on a Mutex or JavaLock, or executing a VM operation. // on a Mutex or JavaLock, or executing a VM operation.
// //
// If StrictSafepointChecks is turned off, it degrades into a No_GC_Verifier // If StrictSafepointChecks is turned off, it degrades into a NoGCVerifier
// //
class No_Safepoint_Verifier : public No_GC_Verifier { class NoSafepointVerifier : public NoGCVerifier {
friend class Pause_No_Safepoint_Verifier; friend class PauseNoSafepointVerifier;
private: private:
bool _activated; bool _activated;
Thread *_thread; Thread *_thread;
public: public:
#ifdef ASSERT #ifdef ASSERT
No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) : NoSafepointVerifier(bool activated = true, bool verifygc = true ) :
No_GC_Verifier(verifygc), NoGCVerifier(verifygc),
_activated(activated) { _activated(activated) {
_thread = Thread::current(); _thread = Thread::current();
if (_activated) { if (_activated) {
@ -210,33 +210,33 @@ class No_Safepoint_Verifier : public No_GC_Verifier {
} }
} }
~No_Safepoint_Verifier() { ~NoSafepointVerifier() {
if (_activated) { if (_activated) {
_thread->_allow_allocation_count--; _thread->_allow_allocation_count--;
_thread->_allow_safepoint_count--; _thread->_allow_safepoint_count--;
} }
} }
#else #else
No_Safepoint_Verifier(bool activated = true, bool verifygc = true) : No_GC_Verifier(verifygc){} NoSafepointVerifier(bool activated = true, bool verifygc = true) : NoGCVerifier(verifygc){}
~No_Safepoint_Verifier() {} ~NoSafepointVerifier() {}
#endif #endif
}; };
// A Pause_No_Safepoint_Verifier is used to temporarily pause the // A PauseNoSafepointVerifier is used to temporarily pause the
// behavior of a No_Safepoint_Verifier object. If we are not in debug // behavior of a NoSafepointVerifier object. If we are not in debug
// mode then there is nothing to do. If the No_Safepoint_Verifier // mode then there is nothing to do. If the NoSafepointVerifier
// object has an _activated value of false, then there is nothing to // object has an _activated value of false, then there is nothing to
// do for safepoint and allocation checking, but there may still be // do for safepoint and allocation checking, but there may still be
// something to do for the underlying No_GC_Verifier object. // something to do for the underlying NoGCVerifier object.
class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier { class PauseNoSafepointVerifier : public PauseNoGCVerifier {
private: private:
No_Safepoint_Verifier * _nsv; NoSafepointVerifier * _nsv;
public: public:
#ifdef ASSERT #ifdef ASSERT
Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv) PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
: Pause_No_GC_Verifier(nsv) { : PauseNoGCVerifier(nsv) {
_nsv = nsv; _nsv = nsv;
if (_nsv->_activated) { if (_nsv->_activated) {
@ -245,16 +245,16 @@ class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier {
} }
} }
~Pause_No_Safepoint_Verifier() { ~PauseNoSafepointVerifier() {
if (_nsv->_activated) { if (_nsv->_activated) {
_nsv->_thread->_allow_allocation_count++; _nsv->_thread->_allow_allocation_count++;
_nsv->_thread->_allow_safepoint_count++; _nsv->_thread->_allow_safepoint_count++;
} }
} }
#else #else
Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv) PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
: Pause_No_GC_Verifier(nsv) {} : PauseNoGCVerifier(nsv) {}
~Pause_No_Safepoint_Verifier() {} ~PauseNoSafepointVerifier() {}
#endif #endif
}; };
@ -287,19 +287,19 @@ class SkipGCALot : public StackObj {
// _thread_in_native mode. In _thread_in_native, it is ok // _thread_in_native mode. In _thread_in_native, it is ok
// for another thread to trigger GC. The rest of the JRT_LEAF // for another thread to trigger GC. The rest of the JRT_LEAF
// rules apply. // rules apply.
class JRT_Leaf_Verifier : public No_Safepoint_Verifier { class JRTLeafVerifier : public NoSafepointVerifier {
static bool should_verify_GC(); static bool should_verify_GC();
public: public:
#ifdef ASSERT #ifdef ASSERT
JRT_Leaf_Verifier(); JRTLeafVerifier();
~JRT_Leaf_Verifier(); ~JRTLeafVerifier();
#else #else
JRT_Leaf_Verifier() {} JRTLeafVerifier() {}
~JRT_Leaf_Verifier() {} ~JRTLeafVerifier() {}
#endif #endif
}; };
// A No_Alloc_Verifier object can be placed in methods where one assumes that // A NoAllocVerifier object can be placed in methods where one assumes that
// no allocation will occur. The destructor will verify this property // no allocation will occur. The destructor will verify this property
// unless the constructor is called with argument false (not activated). // unless the constructor is called with argument false (not activated).
// //
@ -307,23 +307,23 @@ class JRT_Leaf_Verifier : public No_Safepoint_Verifier {
// Note: this only makes sense at safepoints (otherwise, other threads may // Note: this only makes sense at safepoints (otherwise, other threads may
// allocate concurrently.) // allocate concurrently.)
class No_Alloc_Verifier : public StackObj { class NoAllocVerifier : public StackObj {
private: private:
bool _activated; bool _activated;
public: public:
#ifdef ASSERT #ifdef ASSERT
No_Alloc_Verifier(bool activated = true) { NoAllocVerifier(bool activated = true) {
_activated = activated; _activated = activated;
if (_activated) Thread::current()->_allow_allocation_count++; if (_activated) Thread::current()->_allow_allocation_count++;
} }
~No_Alloc_Verifier() { ~NoAllocVerifier() {
if (_activated) Thread::current()->_allow_allocation_count--; if (_activated) Thread::current()->_allow_allocation_count--;
} }
#else #else
No_Alloc_Verifier(bool activated = true) {} NoAllocVerifier(bool activated = true) {}
~No_Alloc_Verifier() {} ~NoAllocVerifier() {}
#endif #endif
}; };

View file

@ -27,7 +27,7 @@
#include "gc/shared/gcLocker.hpp" #include "gc/shared/gcLocker.hpp"
inline void GC_locker::lock_critical(JavaThread* thread) { inline void GCLocker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) { if (!thread->in_critical()) {
if (needs_gc()) { if (needs_gc()) {
// jni_lock call calls enter_critical under the lock so that the // jni_lock call calls enter_critical under the lock so that the
@ -40,7 +40,7 @@ inline void GC_locker::lock_critical(JavaThread* thread) {
thread->enter_critical(); thread->enter_critical();
} }
inline void GC_locker::unlock_critical(JavaThread* thread) { inline void GCLocker::unlock_critical(JavaThread* thread) {
if (thread->in_last_critical()) { if (thread->in_last_critical()) {
if (needs_gc()) { if (needs_gc()) {
// jni_unlock call calls exit_critical under the lock so that // jni_unlock call calls exit_critical under the lock so that

View file

@ -409,7 +409,7 @@ void GenCollectedHeap::do_collection(bool full,
"the requesting thread should have the Heap_lock"); "the requesting thread should have the Heap_lock");
guarantee(!is_gc_active(), "collection is not reentrant"); guarantee(!is_gc_active(), "collection is not reentrant");
if (GC_locker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
} }

View file

@ -208,7 +208,7 @@ HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
return top; return top;
} }
void Filtering_DCTOC::walk_mem_region(MemRegion mr, void FilteringDCTOC::walk_mem_region(MemRegion mr,
HeapWord* bottom, HeapWord* bottom,
HeapWord* top) { HeapWord* top) {
// Note that this assumption won't hold if we have a concurrent // Note that this assumption won't hold if we have a concurrent

View file

@ -676,7 +676,7 @@ class ContiguousSpace: public CompactibleSpace {
// A dirty card to oop closure that does filtering. // A dirty card to oop closure that does filtering.
// It knows how to filter out objects that are outside of the _boundary. // It knows how to filter out objects that are outside of the _boundary.
class Filtering_DCTOC : public DirtyCardToOopClosure { class FilteringDCTOC : public DirtyCardToOopClosure {
protected: protected:
// Override. // Override.
void walk_mem_region(MemRegion mr, void walk_mem_region(MemRegion mr,
@ -697,7 +697,7 @@ protected:
FilteringClosure* cl) = 0; FilteringClosure* cl) = 0;
public: public:
Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl, FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) : HeapWord* boundary) :
DirtyCardToOopClosure(sp, cl, precision, boundary) {} DirtyCardToOopClosure(sp, cl, precision, boundary) {}
@ -713,7 +713,7 @@ public:
// 2. That the space is really made up of objects and not just // 2. That the space is really made up of objects and not just
// blocks. // blocks.
class ContiguousSpaceDCTOC : public Filtering_DCTOC { class ContiguousSpaceDCTOC : public FilteringDCTOC {
protected: protected:
// Overrides. // Overrides.
HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
@ -729,7 +729,7 @@ public:
ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) : HeapWord* boundary) :
Filtering_DCTOC(sp, cl, precision, boundary) FilteringDCTOC(sp, cl, precision, boundary)
{} {}
}; };

View file

@ -49,11 +49,11 @@ class ParScanWithBarrierClosure;
class ParScanWithoutBarrierClosure; class ParScanWithoutBarrierClosure;
// CMS // CMS
class MarkRefsIntoAndScanClosure; class MarkRefsIntoAndScanClosure;
class Par_MarkRefsIntoAndScanClosure; class ParMarkRefsIntoAndScanClosure;
class PushAndMarkClosure; class PushAndMarkClosure;
class Par_PushAndMarkClosure; class ParPushAndMarkClosure;
class PushOrMarkClosure; class PushOrMarkClosure;
class Par_PushOrMarkClosure; class ParPushOrMarkClosure;
class CMSKeepAliveClosure; class CMSKeepAliveClosure;
class CMSInnerParMarkAndPushClosure; class CMSInnerParMarkAndPushClosure;
// Misc // Misc
@ -95,11 +95,11 @@ class NoHeaderExtendedOopClosure;
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f) \ #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f) \
f(MarkRefsIntoAndScanClosure,_nv) \ f(MarkRefsIntoAndScanClosure,_nv) \
f(Par_MarkRefsIntoAndScanClosure,_nv) \ f(ParMarkRefsIntoAndScanClosure,_nv) \
f(PushAndMarkClosure,_nv) \ f(PushAndMarkClosure,_nv) \
f(Par_PushAndMarkClosure,_nv) \ f(ParPushAndMarkClosure,_nv) \
f(PushOrMarkClosure,_nv) \ f(PushOrMarkClosure,_nv) \
f(Par_PushOrMarkClosure,_nv) \ f(ParPushOrMarkClosure,_nv) \
f(CMSKeepAliveClosure,_nv) \ f(CMSKeepAliveClosure,_nv) \
f(CMSInnerParMarkAndPushClosure,_nv) f(CMSInnerParMarkAndPushClosure,_nv)
#endif #endif
@ -136,8 +136,8 @@ class NoHeaderExtendedOopClosure;
#define SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f) \ #define SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f) \
f(MarkRefsIntoAndScanClosure,_nv) \ f(MarkRefsIntoAndScanClosure,_nv) \
f(PushAndMarkClosure,_nv) \ f(PushAndMarkClosure,_nv) \
f(Par_MarkRefsIntoAndScanClosure,_nv) \ f(ParMarkRefsIntoAndScanClosure,_nv) \
f(Par_PushAndMarkClosure,_nv) f(ParPushAndMarkClosure,_nv)
#define ALL_PAR_OOP_ITERATE_CLOSURES(f) \ #define ALL_PAR_OOP_ITERATE_CLOSURES(f) \
f(ExtendedOopClosure,_v) \ f(ExtendedOopClosure,_v) \

View file

@ -84,10 +84,10 @@ bool VM_GC_Operation::skip_operation() const {
if (_full && skip) { if (_full && skip) {
skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
} }
if (!skip && GC_locker::is_active_and_needs_gc()) { if (!skip && GCLocker::is_active_and_needs_gc()) {
skip = Universe::heap()->is_maximal_no_gc(); skip = Universe::heap()->is_maximal_no_gc();
assert(!(skip && (_gc_cause == GCCause::_gc_locker)), assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
"GC_locker cannot be active when initiating GC"); "GCLocker cannot be active when initiating GC");
} }
return skip; return skip;
} }
@ -136,7 +136,7 @@ bool VM_GC_HeapInspection::skip_operation() const {
} }
bool VM_GC_HeapInspection::collect() { bool VM_GC_HeapInspection::collect() {
if (GC_locker::is_active()) { if (GCLocker::is_active()) {
return false; return false;
} }
Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
@ -146,7 +146,7 @@ bool VM_GC_HeapInspection::collect() {
void VM_GC_HeapInspection::doit() { void VM_GC_HeapInspection::doit() {
HandleMark hm; HandleMark hm;
Universe::heap()->ensure_parsability(false); // must happen, even if collection does Universe::heap()->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker) // not happen (e.g. due to GCLocker)
// or _full_gc being false // or _full_gc being false
if (_full_gc) { if (_full_gc) {
if (!collect()) { if (!collect()) {
@ -177,7 +177,7 @@ void VM_GenCollectForAllocation::doit() {
_result = gch->satisfy_failed_allocation(_word_size, _tlab); _result = gch->satisfy_failed_allocation(_word_size, _tlab);
assert(gch->is_in_reserved_or_null(_result), "result not in heap"); assert(gch->is_in_reserved_or_null(_result), "result not in heap");
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
} }
@ -289,7 +289,7 @@ void VM_CollectForMetadataAllocation::doit() {
log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size); log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
if (GC_locker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
} }

View file

@ -340,7 +340,7 @@ void Rewriter::scan_method(Method* method, bool reverse, bool* invokespecial_err
// We cannot tolerate a GC in this block, because we've // We cannot tolerate a GC in this block, because we've
// cached the bytecodes in 'code_base'. If the Method* // cached the bytecodes in 'code_base'. If the Method*
// moves, the bytecodes will also move. // moves, the bytecodes will also move.
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
Bytecodes::Code c; Bytecodes::Code c;
// Bytecodes and their length // Bytecodes and their length

View file

@ -49,7 +49,7 @@ ConstMethod::ConstMethod(int byte_code_size,
MethodType method_type, MethodType method_type,
int size) { int size) {
No_Safepoint_Verifier no_safepoint; NoSafepointVerifier no_safepoint;
init_fingerprint(); init_fingerprint();
set_constants(NULL); set_constants(NULL);
set_stackmap_data(NULL); set_stackmap_data(NULL);

View file

@ -2624,7 +2624,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
bool InstanceKlass::add_member_name(Handle mem_name) { bool InstanceKlass::add_member_name(Handle mem_name) {
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name); jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
MutexLocker ml(MemberNameTable_lock); MutexLocker ml(MemberNameTable_lock);
DEBUG_ONLY(No_Safepoint_Verifier nsv); DEBUG_ONLY(NoSafepointVerifier nsv);
// Check if method has been redefined while taking out MemberNameTable_lock, if so // Check if method has been redefined while taking out MemberNameTable_lock, if so
// return false. We cannot cache obsolete methods. They will crash when the function // return false. We cannot cache obsolete methods. They will crash when the function

View file

@ -59,7 +59,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
Array<Method*>* methods, AccessFlags class_flags, Array<Method*>* methods, AccessFlags class_flags,
Handle classloader, Symbol* classname, Array<Klass*>* local_interfaces, Handle classloader, Symbol* classname, Array<Klass*>* local_interfaces,
TRAPS) { TRAPS) {
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// set up default result values // set up default result values
int vtable_length = 0; int vtable_length = 0;

View file

@ -77,7 +77,7 @@ Method* Method::allocate(ClassLoaderData* loader_data,
} }
Method::Method(ConstMethod* xconst, AccessFlags access_flags) { Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
No_Safepoint_Verifier no_safepoint; NoSafepointVerifier no_safepoint;
set_constMethod(xconst); set_constMethod(xconst);
set_access_flags(access_flags); set_access_flags(access_flags);
#ifdef CC_INTERP #ifdef CC_INTERP
@ -998,7 +998,7 @@ void Method::restore_unshareable_info(TRAPS) {
// or adapter that it points to is still live and valid. // or adapter that it points to is still live and valid.
// This function must not hit a safepoint! // This function must not hit a safepoint!
address Method::verified_code_entry() { address Method::verified_code_entry() {
debug_only(No_Safepoint_Verifier nsv;) debug_only(NoSafepointVerifier nsv;)
assert(_from_compiled_entry != NULL, "must be set"); assert(_from_compiled_entry != NULL, "must be set");
return _from_compiled_entry; return _from_compiled_entry;
} }
@ -1548,7 +1548,7 @@ void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idn
int length = methods->length(); int length = methods->length();
if (length > 1) { if (length > 1) {
{ {
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent); QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
} }
// Reset method ordering // Reset method ordering

View file

@ -1140,7 +1140,7 @@ MethodData::MethodData(const methodHandle& method, int size, TRAPS)
} }
void MethodData::initialize() { void MethodData::initialize() {
No_Safepoint_Verifier no_safepoint; // init function atomic wrt GC NoSafepointVerifier no_safepoint; // init function atomic wrt GC
ResourceMark rm; ResourceMark rm;
init(); init();

View file

@ -1383,7 +1383,7 @@ address OptoRuntime::handle_exception_C(JavaThread* thread) {
// However, there needs to be a safepoint check in the middle! So compiled // However, there needs to be a safepoint check in the middle! So compiled
// safepoints are completely watertight. // safepoints are completely watertight.
// //
// Thus, it cannot be a leaf since it contains the No_GC_Verifier. // Thus, it cannot be a leaf since it contains the NoGCVerifier.
// //
// *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
// //

View file

@ -204,7 +204,7 @@ intptr_t jfieldIDWorkaround::encode_klass_hash(Klass* k, intptr_t offset) {
field_klass = super_klass; // super contains the field also field_klass = super_klass; // super contains the field also
super_klass = field_klass->super(); super_klass = field_klass->super();
} }
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(NoSafepointVerifier nosafepoint;)
uintptr_t klass_hash = field_klass->identity_hash(); uintptr_t klass_hash = field_klass->identity_hash();
return ((klass_hash & klass_mask) << klass_shift) | checked_mask_in_place; return ((klass_hash & klass_mask) << klass_shift) | checked_mask_in_place;
} else { } else {
@ -224,7 +224,7 @@ bool jfieldIDWorkaround::klass_hash_ok(Klass* k, jfieldID id) {
uintptr_t as_uint = (uintptr_t) id; uintptr_t as_uint = (uintptr_t) id;
intptr_t klass_hash = (as_uint >> klass_shift) & klass_mask; intptr_t klass_hash = (as_uint >> klass_shift) & klass_mask;
do { do {
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(NoSafepointVerifier nosafepoint;)
// Could use a non-blocking query for identity_hash here... // Could use a non-blocking query for identity_hash here...
if ((k->identity_hash() & klass_mask) == klass_hash) if ((k->identity_hash() & klass_mask) == klass_hash)
return true; return true;
@ -1124,7 +1124,7 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
selected_method = m; selected_method = m;
} else if (!m->has_itable_index()) { } else if (!m->has_itable_index()) {
// non-interface call -- for that little speed boost, don't handlize // non-interface call -- for that little speed boost, don't handlize
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(NoSafepointVerifier nosafepoint;)
// jni_GetMethodID makes sure class is linked and initialized // jni_GetMethodID makes sure class is linked and initialized
// so m should have a valid vtable index. // so m should have a valid vtable index.
assert(m->valid_vtable_index(), "no valid vtable index"); assert(m->valid_vtable_index(), "no valid vtable index");
@ -3157,7 +3157,7 @@ JNI_END
JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy)) JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy))
JNIWrapper("GetPrimitiveArrayCritical"); JNIWrapper("GetPrimitiveArrayCritical");
HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy); HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy);
GC_locker::lock_critical(thread); GCLocker::lock_critical(thread);
if (isCopy != NULL) { if (isCopy != NULL) {
*isCopy = JNI_FALSE; *isCopy = JNI_FALSE;
} }
@ -3179,7 +3179,7 @@ JNI_ENTRY(void, jni_ReleasePrimitiveArrayCritical(JNIEnv *env, jarray array, voi
JNIWrapper("ReleasePrimitiveArrayCritical"); JNIWrapper("ReleasePrimitiveArrayCritical");
HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode); HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
// The array, carray and mode arguments are ignored // The array, carray and mode arguments are ignored
GC_locker::unlock_critical(thread); GCLocker::unlock_critical(thread);
HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN(); HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
JNI_END JNI_END
@ -3187,7 +3187,7 @@ JNI_END
JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy)) JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
JNIWrapper("GetStringCritical"); JNIWrapper("GetStringCritical");
HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy); HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
GC_locker::lock_critical(thread); GCLocker::lock_critical(thread);
oop s = JNIHandles::resolve_non_null(string); oop s = JNIHandles::resolve_non_null(string);
typeArrayOop s_value = java_lang_String::value(s); typeArrayOop s_value = java_lang_String::value(s);
bool is_latin1 = java_lang_String::is_latin1(s); bool is_latin1 = java_lang_String::is_latin1(s);
@ -3225,7 +3225,7 @@ JNI_ENTRY(void, jni_ReleaseStringCritical(JNIEnv *env, jstring str, const jchar
// This assumes that ReleaseStringCritical bookends GetStringCritical. // This assumes that ReleaseStringCritical bookends GetStringCritical.
FREE_C_HEAP_ARRAY(jchar, chars); FREE_C_HEAP_ARRAY(jchar, chars);
} }
GC_locker::unlock_critical(thread); GCLocker::unlock_critical(thread);
HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN(); HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
JNI_END JNI_END

View file

@ -95,7 +95,7 @@ JvmtiEnvBase::initialize() {
{ {
// This block of code must not contain any safepoints, as list deallocation // This block of code must not contain any safepoints, as list deallocation
// (which occurs at a safepoint) cannot occur simultaneously with this list // (which occurs at a safepoint) cannot occur simultaneously with this list
// addition. Note: No_Safepoint_Verifier cannot, currently, be used before // addition. Note: NoSafepointVerifier cannot, currently, be used before
// threads exist. // threads exist.
JvmtiEnvIterator it; JvmtiEnvIterator it;
JvmtiEnvBase *previous_env = NULL; JvmtiEnvBase *previous_env = NULL;

View file

@ -1904,7 +1904,7 @@ void JvmtiExport::record_vm_internal_object_allocation(oop obj) {
Thread* thread = Thread::current_or_null(); Thread* thread = Thread::current_or_null();
if (thread != NULL && thread->is_Java_thread()) { if (thread != NULL && thread->is_Java_thread()) {
// Can not take safepoint here. // Can not take safepoint here.
No_Safepoint_Verifier no_sfpt; NoSafepointVerifier no_sfpt;
// Can not take safepoint here so can not use state_for to get // Can not take safepoint here so can not use state_for to get
// jvmti thread state. // jvmti thread state.
JvmtiThreadState *state = ((JavaThread*)thread)->jvmti_thread_state(); JvmtiThreadState *state = ((JavaThread*)thread)->jvmti_thread_state();

View file

@ -1674,10 +1674,10 @@ void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method,
// We cache a pointer to the bytecodes here in code_base. If GC // We cache a pointer to the bytecodes here in code_base. If GC
// moves the Method*, then the bytecodes will also move which // moves the Method*, then the bytecodes will also move which
// will likely cause a crash. We create a No_Safepoint_Verifier // will likely cause a crash. We create a NoSafepointVerifier
// object to detect whether we pass a possible safepoint in this // object to detect whether we pass a possible safepoint in this
// code block. // code block.
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
// Bytecodes and their length // Bytecodes and their length
address code_base = method->code_base(); address code_base = method->code_base();
@ -1735,7 +1735,7 @@ void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method,
Relocator rc(method, NULL /* no RelocatorListener needed */); Relocator rc(method, NULL /* no RelocatorListener needed */);
methodHandle m; methodHandle m;
{ {
Pause_No_Safepoint_Verifier pnsv(&nsv); PauseNoSafepointVerifier pnsv(&nsv);
// ldc is 2 bytes and ldc_w is 3 bytes // ldc is 2 bytes and ldc_w is 3 bytes
m = rc.insert_space_at(bci, 3, inst_buffer, CHECK); m = rc.insert_space_at(bci, 3, inst_buffer, CHECK);

View file

@ -86,7 +86,7 @@ JvmtiThreadState::JvmtiThreadState(JavaThread* thread)
{ {
// The thread state list manipulation code must not have safepoints. // The thread state list manipulation code must not have safepoints.
// See periodic_clean_up(). // See periodic_clean_up().
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(NoSafepointVerifier nosafepoint;)
_prev = NULL; _prev = NULL;
_next = _head; _next = _head;
@ -123,7 +123,7 @@ JvmtiThreadState::~JvmtiThreadState() {
{ {
// The thread state list manipulation code must not have safepoints. // The thread state list manipulation code must not have safepoints.
// See periodic_clean_up(). // See periodic_clean_up().
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(NoSafepointVerifier nosafepoint;)
if (_prev == NULL) { if (_prev == NULL) {
assert(_head == this, "sanity check"); assert(_head == this, "sanity check");
@ -147,7 +147,7 @@ JvmtiThreadState::periodic_clean_up() {
// This iteration is initialized with "_head" instead of "JvmtiThreadState::first()" // This iteration is initialized with "_head" instead of "JvmtiThreadState::first()"
// because the latter requires the JvmtiThreadState_lock. // because the latter requires the JvmtiThreadState_lock.
// This iteration is safe at a safepoint as well, see the No_Safepoint_Verifier // This iteration is safe at a safepoint as well, see the NoSafepointVerifier
// asserts at all list manipulation sites. // asserts at all list manipulation sites.
for (JvmtiThreadState *state = _head; state != NULL; state = state->next()) { for (JvmtiThreadState *state = _head; state != NULL; state = state->next()) {
// For each environment thread state corresponding to an invalid environment // For each environment thread state corresponding to an invalid environment
@ -182,7 +182,7 @@ void JvmtiThreadState::add_env(JvmtiEnvBase *env) {
// add this environment thread state to the end of the list (order is important) // add this environment thread state to the end of the list (order is important)
{ {
// list deallocation (which occurs at a safepoint) cannot occur simultaneously // list deallocation (which occurs at a safepoint) cannot occur simultaneously
debug_only(No_Safepoint_Verifier nosafepoint;) debug_only(NoSafepointVerifier nosafepoint;)
JvmtiEnvThreadStateIterator it(this); JvmtiEnvThreadStateIterator it(this);
JvmtiEnvThreadState* previous_ets = NULL; JvmtiEnvThreadState* previous_ets = NULL;

View file

@ -981,7 +981,7 @@ void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
int marked = 0; int marked = 0;
CallSiteDepChange changes(call_site(), target()); CallSiteDepChange changes(call_site(), target());
{ {
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
oop context = java_lang_invoke_CallSite::context(call_site()); oop context = java_lang_invoke_CallSite::context(call_site());
@ -1339,7 +1339,7 @@ JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject con
int marked = 0; int marked = 0;
{ {
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
assert(safe_to_expunge(), "removal is not safe"); assert(safe_to_expunge(), "removal is not safe");
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context()); DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());

View file

@ -1681,16 +1681,16 @@ void Arguments::set_cms_and_parnew_gc_flags() {
// OldPLAB sizing manually turned off: Use a larger default setting, // OldPLAB sizing manually turned off: Use a larger default setting,
// unless it was manually specified. This is because a too-low value // unless it was manually specified. This is because a too-low value
// will slow down scavenges. // will slow down scavenges.
FLAG_SET_ERGO(size_t, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166 FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
} else { } else {
FLAG_SET_DEFAULT(OldPLABSize, CFLS_LAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
} }
} }
// If either of the static initialization defaults have changed, note this // If either of the static initialization defaults have changed, note this
// modification. // modification.
if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) { if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight); CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
} }
if (!ClassUnloading) { if (!ClassUnloading) {

View file

@ -296,7 +296,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// Ensure that no safepoint is taken after pointers have been stored // Ensure that no safepoint is taken after pointers have been stored
// in fields of rematerialized objects. If a safepoint occurs from here on // in fields of rematerialized objects. If a safepoint occurs from here on
// out the java state residing in the vframeArray will be missed. // out the java state residing in the vframeArray will be missed.
No_Safepoint_Verifier no_safepoint; NoSafepointVerifier no_safepoint;
vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
#if defined(COMPILER2) || INCLUDE_JVMCI #if defined(COMPILER2) || INCLUDE_JVMCI

View file

@ -883,7 +883,7 @@ public:
\ \
notproduct(bool, StrictSafepointChecks, trueInDebug, \ notproduct(bool, StrictSafepointChecks, trueInDebug, \
"Enable strict checks that safepoints cannot happen for threads " \ "Enable strict checks that safepoints cannot happen for threads " \
"that use No_Safepoint_Verifier") \ "that use NoSafepointVerifier") \
\ \
notproduct(bool, VerifyLastFrame, false, \ notproduct(bool, VerifyLastFrame, false, \
"Verify oops on last frame on entry to VM") \ "Verify oops on last frame on entry to VM") \

View file

@ -451,7 +451,7 @@ class RuntimeHistogramElement : public HistogramElement {
#define IRT_LEAF(result_type, header) \ #define IRT_LEAF(result_type, header) \
result_type header { \ result_type header { \
VM_LEAF_BASE(result_type, header) \ VM_LEAF_BASE(result_type, header) \
debug_only(No_Safepoint_Verifier __nspv(true);) debug_only(NoSafepointVerifier __nspv(true);)
#define IRT_ENTRY_NO_ASYNC(result_type, header) \ #define IRT_ENTRY_NO_ASYNC(result_type, header) \
@ -475,7 +475,7 @@ class RuntimeHistogramElement : public HistogramElement {
#define JRT_LEAF(result_type, header) \ #define JRT_LEAF(result_type, header) \
result_type header { \ result_type header { \
VM_LEAF_BASE(result_type, header) \ VM_LEAF_BASE(result_type, header) \
debug_only(JRT_Leaf_Verifier __jlv;) debug_only(JRTLeafVerifier __jlv;)
#define JRT_ENTRY_NO_ASYNC(result_type, header) \ #define JRT_ENTRY_NO_ASYNC(result_type, header) \

View file

@ -363,7 +363,7 @@ void SafepointSynchronize::begin() {
#endif // ASSERT #endif // ASSERT
// Update the count of active JNI critical regions // Update the count of active JNI critical regions
GC_locker::set_jni_lock_count(_current_jni_active_count); GCLocker::set_jni_lock_count(_current_jni_active_count);
if (log_is_enabled(Debug, safepoint)) { if (log_is_enabled(Debug, safepoint)) {
VM_Operation *op = VMThread::vm_operation(); VM_Operation *op = VMThread::vm_operation();
@ -563,7 +563,7 @@ void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, Ja
if (!thread->do_critical_native_unlock()) { if (!thread->do_critical_native_unlock()) {
#ifdef ASSERT #ifdef ASSERT
if (!thread->in_critical()) { if (!thread->in_critical()) {
GC_locker::increment_debug_jni_lock_count(); GCLocker::increment_debug_jni_lock_count();
} }
#endif #endif
thread->enter_critical(); thread->enter_critical();

View file

@ -2742,8 +2742,8 @@ JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* threa
return; return;
} }
// Lock and unlock a critical section to give the system a chance to block // Lock and unlock a critical section to give the system a chance to block
GC_locker::lock_critical(thread); GCLocker::lock_critical(thread);
GC_locker::unlock_critical(thread); GCLocker::unlock_critical(thread);
JRT_END JRT_END
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------

View file

@ -159,7 +159,7 @@ bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(self->is_Java_thread(), "invariant"); assert(self->is_Java_thread(), "invariant");
assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj if (obj == NULL) return false; // slow-path for invalid obj
const markOop mark = obj->mark(); const markOop mark = obj->mark();
@ -209,7 +209,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(Self->is_Java_thread(), "invariant"); assert(Self->is_Java_thread(), "invariant");
assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
if (obj == NULL) return false; // Need to throw NPE if (obj == NULL) return false; // Need to throw NPE
const markOop mark = obj->mark(); const markOop mark = obj->mark();
@ -1734,7 +1734,7 @@ class ReleaseJavaMonitorsClosure: public MonitorClosure {
void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
assert(THREAD == JavaThread::current(), "must be current Java thread"); assert(THREAD == JavaThread::current(), "must be current Java thread");
No_Safepoint_Verifier nsv; NoSafepointVerifier nsv;
ReleaseJavaMonitorsClosure rjmc(THREAD); ReleaseJavaMonitorsClosure rjmc(THREAD);
Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
ObjectSynchronizer::monitors_iterate(&rjmc); ObjectSynchronizer::monitors_iterate(&rjmc);

View file

@ -2440,7 +2440,7 @@ void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
// normal checks but also performs the transition back into // normal checks but also performs the transition back into
// thread_in_Java state. This is required so that critical natives // thread_in_Java state. This is required so that critical natives
// can potentially block and perform a GC if they are the last thread // can potentially block and perform a GC if they are the last thread
// exiting the GC_locker. // exiting the GCLocker.
void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) { void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
check_special_condition_for_native_trans(thread); check_special_condition_for_native_trans(thread);
@ -2449,7 +2449,7 @@ void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThr
if (thread->do_critical_native_unlock()) { if (thread->do_critical_native_unlock()) {
ThreadInVMfromJavaNoAsyncException tiv(thread); ThreadInVMfromJavaNoAsyncException tiv(thread);
GC_locker::unlock_critical(thread); GCLocker::unlock_critical(thread);
thread->clear_critical_native_unlock(); thread->clear_critical_native_unlock();
} }
} }

View file

@ -255,7 +255,7 @@ class Thread: public ThreadShadow {
// If !allow_allocation(), then an assertion failure will happen during allocation // If !allow_allocation(), then an assertion failure will happen during allocation
// (Hence, !allow_safepoint() => !allow_allocation()). // (Hence, !allow_safepoint() => !allow_allocation()).
// //
// The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters. // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters.
// //
NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops. debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
@ -263,10 +263,10 @@ class Thread: public ThreadShadow {
// Used by SkipGCALot class. // Used by SkipGCALot class.
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
friend class No_Alloc_Verifier; friend class NoAllocVerifier;
friend class No_Safepoint_Verifier; friend class NoSafepointVerifier;
friend class Pause_No_Safepoint_Verifier; friend class PauseNoSafepointVerifier;
friend class GC_locker; friend class GCLocker;
ThreadLocalAllocBuffer _tlab; // Thread-local eden ThreadLocalAllocBuffer _tlab; // Thread-local eden
jlong _allocated_bytes; // Cumulative number of bytes allocated on jlong _allocated_bytes; // Cumulative number of bytes allocated on

View file

@ -503,7 +503,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
/* Generation and Space hierarchies */ \ /* Generation and Space hierarchies */ \
/**********************************************************************************/ \ /**********************************************************************************/ \
\ \
unchecked_nonstatic_field(ageTable, sizes, sizeof(ageTable::sizes)) \ unchecked_nonstatic_field(AgeTable, sizes, sizeof(AgeTable::sizes)) \
\ \
nonstatic_field(BarrierSet, _fake_rtti, BarrierSet::FakeRtti) \ nonstatic_field(BarrierSet, _fake_rtti, BarrierSet::FakeRtti) \
\ \
@ -560,7 +560,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
\ \
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \ nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \ nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, ageTable) \ nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \ nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \ nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \ nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
@ -1600,7 +1600,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
\ \
/* Miscellaneous other GC types */ \ /* Miscellaneous other GC types */ \
\ \
declare_toplevel_type(ageTable) \ declare_toplevel_type(AgeTable) \
declare_toplevel_type(Generation::StatRecord) \ declare_toplevel_type(Generation::StatRecord) \
declare_toplevel_type(GenerationSpec) \ declare_toplevel_type(GenerationSpec) \
declare_toplevel_type(HeapWord) \ declare_toplevel_type(HeapWord) \
@ -2310,7 +2310,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
/* Generation and Space Hierarchy Constants */ \ /* Generation and Space Hierarchy Constants */ \
/********************************************/ \ /********************************************/ \
\ \
declare_constant(ageTable::table_size) \ declare_constant(AgeTable::table_size) \
\ \
declare_constant(BarrierSet::ModRef) \ declare_constant(BarrierSet::ModRef) \
declare_constant(BarrierSet::CardTableModRef) \ declare_constant(BarrierSet::CardTableModRef) \

View file

@ -1708,10 +1708,10 @@ void VM_HeapDumper::doit() {
CollectedHeap* ch = Universe::heap(); CollectedHeap* ch = Universe::heap();
ch->ensure_parsability(false); // must happen, even if collection does ch->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker) // not happen (e.g. due to GCLocker)
if (_gc_before_heap_dump) { if (_gc_before_heap_dump) {
if (GC_locker::is_active()) { if (GCLocker::is_active()) {
warning("GC locker is held; pre-heapdump GC was skipped"); warning("GC locker is held; pre-heapdump GC was skipped");
} else { } else {
ch->collect_as_vm_thread(GCCause::_heap_dump); ch->collect_as_vm_thread(GCCause::_heap_dump);