8154750: Add missing OrderAccess operations to ClassLoaderData lock-free data structures

Reviewed-by: kbarrett, coleenp, acorn
This commit is contained in:
David Holmes 2016-06-02 23:37:09 -04:00
parent e8db159098
commit c301fb7188
3 changed files with 96 additions and 76 deletions

View file

@ -67,6 +67,7 @@
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
@ -76,6 +77,11 @@
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#endif #endif
// helper function to avoid in-line casts
template <typename T> static T* load_ptr_acquire(T* volatile *p) {
return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
}
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -147,20 +153,23 @@ void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
} }
void ClassLoaderData::classes_do(KlassClosure* klass_closure) { void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k); klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!"); assert(k != k->next_link(), "no loops!");
} }
} }
void ClassLoaderData::classes_do(void f(Klass * const)) { void ClassLoaderData::classes_do(void f(Klass * const)) {
assert_locked_or_safepoint(_metaspace_lock);
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
f(k); f(k);
} }
} }
void ClassLoaderData::methods_do(void f(Method*)) { void ClassLoaderData::methods_do(void f(Method*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
InstanceKlass::cast(k)->methods_do(f); InstanceKlass::cast(k)->methods_do(f);
} }
@ -179,7 +188,8 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
} }
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
f(InstanceKlass::cast(k)); f(InstanceKlass::cast(k));
} }
@ -188,6 +198,7 @@ void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
} }
void ClassLoaderData::modules_do(void f(ModuleEntry*)) { void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
assert_locked_or_safepoint(Module_lock);
if (_modules != NULL) { if (_modules != NULL) {
for (int i = 0; i < _modules->table_size(); i++) { for (int i = 0; i < _modules->table_size(); i++) {
for (ModuleEntry* entry = _modules->bucket(i); for (ModuleEntry* entry = _modules->bucket(i);
@ -200,9 +211,11 @@ void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
} }
void ClassLoaderData::packages_do(void f(PackageEntry*)) { void ClassLoaderData::packages_do(void f(PackageEntry*)) {
if (_packages != NULL) { // Lock-free access requires load_ptr_acquire
for (int i = 0; i < _packages->table_size(); i++) { PackageEntryTable* packages = load_ptr_acquire(&_packages);
for (PackageEntry* entry = _packages->bucket(i); if (packages != NULL) {
for (int i = 0; i < packages->table_size(); i++) {
for (PackageEntry* entry = packages->bucket(i);
entry != NULL; entry != NULL;
entry = entry->next()) { entry = entry->next()) {
f(entry); f(entry);
@ -325,10 +338,9 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
Klass* old_value = _klasses; Klass* old_value = _klasses;
k->set_next_link(old_value); k->set_next_link(old_value);
// Make sure linked class is stable, since the class list is walked without a lock // Link the new item into the list, making sure the linked class is stable
OrderAccess::storestore(); // since the list can be walked without a lock
// link the new item into the list OrderAccess::release_store_ptr(&_klasses, k);
_klasses = k;
} }
if (publicize && k->class_loader_data() != NULL) { if (publicize && k->class_loader_data() != NULL) {
@ -343,11 +355,10 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
} }
} }
// This is called by InstanceKlass::deallocate_contents() to remove the // Remove a klass from the _klasses list for scratch_class during redefinition
// scratch_class for redefine classes. We need a lock because there it may not // or parsed class in the case of an error.
// be called at a safepoint if there's an error.
void ClassLoaderData::remove_class(Klass* scratch_class) { void ClassLoaderData::remove_class(Klass* scratch_class) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
Klass* prev = NULL; Klass* prev = NULL;
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k == scratch_class) { if (k == scratch_class) {
@ -390,42 +401,46 @@ void ClassLoaderData::unload() {
PackageEntryTable* ClassLoaderData::packages() { PackageEntryTable* ClassLoaderData::packages() {
// Lazily create the package entry table at first request. // Lazily create the package entry table at first request.
if (_packages == NULL) { // Lock-free access requires load_ptr_acquire.
PackageEntryTable* packages = load_ptr_acquire(&_packages);
if (packages == NULL) {
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Check if _packages got allocated while we were waiting for this lock. // Check if _packages got allocated while we were waiting for this lock.
if (_packages == NULL) { if ((packages = _packages) == NULL) {
_packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size); packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
// Ensure _packages is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_packages, packages);
} }
} }
return _packages; return packages;
} }
ModuleEntryTable* ClassLoaderData::modules() { ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request. // Lazily create the module entry table at first request.
if (_modules == NULL) { // Lock-free access requires load_ptr_acquire.
ModuleEntryTable* modules = load_ptr_acquire(&_modules);
if (modules == NULL) {
MutexLocker m1(Module_lock); MutexLocker m1(Module_lock);
// Check again if _modules has been allocated while we were getting this lock. // Check if _modules got allocated while we were waiting for this lock.
if (_modules != NULL) { if ((modules = _modules) == NULL) {
return _modules; modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
}
ModuleEntryTable* temp_table = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
// Each loader has one unnamed module entry. Create it before // Each loader has one unnamed module entry. Create it before
// any classes, loaded by this loader, are defined in case // any classes, loaded by this loader, are defined in case
// they end up being defined in loader's unnamed module. // they end up being defined in loader's unnamed module.
temp_table->create_unnamed_module(this); modules->create_unnamed_module(this);
{ {
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock // Ensure _modules is stable, since it is examined without a lock
OrderAccess::storestore(); OrderAccess::release_store_ptr(&_modules, modules);
_modules = temp_table;
} }
} }
return _modules; }
return modules;
} }
oop ClassLoaderData::keep_alive_object() const { oop ClassLoaderData::keep_alive_object() const {
assert_locked_or_safepoint(_metaspace_lock);
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive"); assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader(); return is_anonymous() ? _klasses->java_mirror() : class_loader();
} }
@ -499,30 +514,33 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
// to create smaller arena for Reflection class loaders also. // to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are // The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own. // simply for delegating with no metadata of their own.
if (_metaspace == NULL) { // Lock-free access requires load_ptr_acquire.
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); Metaspace* metaspace = load_ptr_acquire(&_metaspace);
// Check again if metaspace has been allocated while we were getting this lock. if (metaspace == NULL) {
if (_metaspace != NULL) { MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
return _metaspace; // Check if _metaspace got allocated while we were waiting for this lock.
} if ((metaspace = _metaspace) == NULL) {
if (this == the_null_class_loader_data()) { if (this == the_null_class_loader_data()) {
assert (class_loader() == NULL, "Must be"); assert (class_loader() == NULL, "Must be");
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType)); metaspace = new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
} else if (is_anonymous()) { } else if (is_anonymous()) {
if (class_loader() != NULL) { if (class_loader() != NULL) {
log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name()); log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
} }
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType)); metaspace = new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) { } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
if (class_loader() != NULL) { if (class_loader() != NULL) {
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name()); log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
} }
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType)); metaspace = new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
} else { } else {
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType)); metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
}
// Ensure _metaspace is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_metaspace, metaspace);
} }
} }
return _metaspace; return metaspace;
} }
JNIHandleBlock* ClassLoaderData::handles() const { return _handles; } JNIHandleBlock* ClassLoaderData::handles() const { return _handles; }
@ -638,6 +656,7 @@ void ClassLoaderData::dump(outputStream * const out) {
#endif // PRODUCT #endif // PRODUCT
void ClassLoaderData::verify() { void ClassLoaderData::verify() {
assert_locked_or_safepoint(_metaspace_lock);
oop cl = class_loader(); oop cl = class_loader();
guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same"); guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
@ -656,7 +675,8 @@ void ClassLoaderData::verify() {
} }
bool ClassLoaderData::contains_klass(Klass* klass) { bool ClassLoaderData::contains_klass(Klass* klass) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true; if (k == klass) return true;
} }
return false; return false;
@ -1046,6 +1066,7 @@ ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic
// Find the first klass in the CLDG. // Find the first klass in the CLDG.
while (cld != NULL) { while (cld != NULL) {
assert_locked_or_safepoint(cld->metaspace_lock());
klass = cld->_klasses; klass = cld->_klasses;
if (klass != NULL) { if (klass != NULL) {
_next_klass = klass; _next_klass = klass;
@ -1063,6 +1084,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass)
// No more klasses in the current CLD. Time to find a new CLD. // No more klasses in the current CLD. Time to find a new CLD.
ClassLoaderData* cld = klass->class_loader_data(); ClassLoaderData* cld = klass->class_loader_data();
assert_locked_or_safepoint(cld->metaspace_lock());
while (next == NULL) { while (next == NULL) {
cld = cld->next(); cld = cld->next();
if (cld == NULL) { if (cld == NULL) {

View file

@ -171,7 +171,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Dependencies _dependencies; // holds dependencies from this class loader Dependencies _dependencies; // holds dependencies from this class loader
// data to others. // data to others.
Metaspace * _metaspace; // Meta-space where meta-data defined by the Metaspace * volatile _metaspace; // Meta-space where meta-data defined by the
// classes in the class loader are allocated. // classes in the class loader are allocated.
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup. Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away bool _unloading; // true if this class loader goes away
@ -186,9 +186,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which
// have the same life cycle of the corresponding ClassLoader. // have the same life cycle of the corresponding ClassLoader.
Klass* _klasses; // The classes defined by the class loader. Klass* volatile _klasses; // The classes defined by the class loader.
PackageEntryTable* _packages; // The packages defined by the class loader. PackageEntryTable* volatile _packages; // The packages defined by the class loader.
ModuleEntryTable* _modules; // The modules defined by the class loader. ModuleEntryTable* volatile _modules; // The modules defined by the class loader.
// These method IDs are created for the class loader and set to NULL when the // These method IDs are created for the class loader and set to NULL when the
// class loader is unloaded. They are rarely freed, only for redefine classes // class loader is unloaded. They are rarely freed, only for redefine classes
@ -216,8 +216,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies); ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
~ClassLoaderData(); ~ClassLoaderData();
void set_metaspace(Metaspace* m) { _metaspace = m; }
JNIHandleBlock* handles() const; JNIHandleBlock* handles() const;
void set_handles(JNIHandleBlock* handles); void set_handles(JNIHandleBlock* handles);

View file

@ -1104,21 +1104,21 @@ void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_k, TRAP
void InstanceKlass::mask_for(const methodHandle& method, int bci, void InstanceKlass::mask_for(const methodHandle& method, int bci,
InterpreterOopMap* entry_for) { InterpreterOopMap* entry_for) {
// Dirty read, then double-check under a lock. // Lazily create the _oop_map_cache at first request
if (_oop_map_cache == NULL) { // Lock-free access requires load_ptr_acquire.
// Otherwise, allocate a new one. OopMapCache* oop_map_cache =
static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
if (oop_map_cache == NULL) {
MutexLocker x(OopMapCacheAlloc_lock); MutexLocker x(OopMapCacheAlloc_lock);
// First time use. Allocate a cache in C heap // Check if _oop_map_cache was allocated while we were waiting for this lock
if (_oop_map_cache == NULL) { if ((oop_map_cache = _oop_map_cache) == NULL) {
// Release stores from OopMapCache constructor before assignment oop_map_cache = new OopMapCache();
// to _oop_map_cache. C++ compilers on ppc do not emit the // Ensure _oop_map_cache is stable, since it is examined without a lock
// required memory barrier only because of the volatile OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
// qualifier of _oop_map_cache.
OrderAccess::release_store_ptr(&_oop_map_cache, new OopMapCache());
} }
} }
// _oop_map_cache is constant after init; lookup below does is own locking. // _oop_map_cache is constant after init; lookup below does its own locking.
_oop_map_cache->lookup(method, bci, entry_for); oop_map_cache->lookup(method, bci, entry_for);
} }