8047212: runtime/ParallelClassLoading/bootstrap/random/inner-complex assert(ObjectSynchronizer::verify_objmon_isinpool(inf)) failed: monitor is invalid

Fix race between ObjectMonitor alloc and verification code; teach SA about "static pointer volatile" fields.

Reviewed-by: cvarming, dholmes, sspitsyn, coleenp
This commit is contained in:
Daniel D. Daugherty 2015-10-24 15:44:08 -07:00
parent ed82cd0ee8
commit cb0500e51d
3 changed files with 70 additions and 50 deletions

View file

@ -116,7 +116,7 @@ static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
// global list of blocks of monitors // global list of blocks of monitors
// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
// want to expose the PaddedEnd template more than necessary. // want to expose the PaddedEnd template more than necessary.
ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
// global monitor free list // global monitor free list
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
// global monitor in-use list, for moribund threads, // global monitor in-use list, for moribund threads,
@ -890,21 +890,22 @@ JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
return NULL; return NULL;
} }
// Visitors ... // Visitors ...
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; PaddedEnd<ObjectMonitor> * block =
ObjectMonitor* mid; (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
while (block) { while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = _BLOCKSIZE - 1; i > 0; i--) { for (int i = _BLOCKSIZE - 1; i > 0; i--) {
mid = (ObjectMonitor *)(block + i); ObjectMonitor* mid = (ObjectMonitor *)(block + i);
oop object = (oop) mid->object(); oop object = (oop)mid->object();
if (object != NULL) { if (object != NULL) {
closure->do_monitor(mid); closure->do_monitor(mid);
} }
} }
block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
} }
} }
@ -919,9 +920,9 @@ static inline ObjectMonitor* next(ObjectMonitor* block) {
void ObjectSynchronizer::oops_do(OopClosure* f) { void ObjectSynchronizer::oops_do(OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
for (PaddedEnd<ObjectMonitor> * block = PaddedEnd<ObjectMonitor> * block =
(PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
block = (PaddedEnd<ObjectMonitor> *)next(block)) { for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) { for (int i = 1; i < _BLOCKSIZE; i++) {
ObjectMonitor* mid = (ObjectMonitor *)&block[i]; ObjectMonitor* mid = (ObjectMonitor *)&block[i];
@ -1139,7 +1140,9 @@ ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) {
// The very first objectMonitor in a block is reserved and dedicated. // The very first objectMonitor in a block is reserved and dedicated.
// It serves as blocklist "next" linkage. // It serves as blocklist "next" linkage.
temp[0].FreeNext = gBlockList; temp[0].FreeNext = gBlockList;
gBlockList = temp; // There are lock-free uses of gBlockList so make sure that
// the previous stores happen before we update gBlockList.
OrderAccess::release_store_ptr(&gBlockList, temp);
// Add the new string of objectMonitors to the global free list // Add the new string of objectMonitors to the global free list
temp[_BLOCKSIZE - 1].FreeNext = gFreeList; temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
@ -1621,31 +1624,33 @@ void ObjectSynchronizer::deflate_idle_monitors() {
nInuse += gOmInUseCount; nInuse += gOmInUseCount;
} }
} else for (PaddedEnd<ObjectMonitor> * block = } else {
(PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; PaddedEnd<ObjectMonitor> * block =
block = (PaddedEnd<ObjectMonitor> *)next(block)) { (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
// Iterate over all extant monitors - Scavenge all idle monitors. for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
assert(block->object() == CHAINMARKER, "must be a block header"); // Iterate over all extant monitors - Scavenge all idle monitors.
nInCirculation += _BLOCKSIZE; assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) { nInCirculation += _BLOCKSIZE;
ObjectMonitor* mid = (ObjectMonitor*)&block[i]; for (int i = 1; i < _BLOCKSIZE; i++) {
oop obj = (oop) mid->object(); ObjectMonitor* mid = (ObjectMonitor*)&block[i];
oop obj = (oop)mid->object();
if (obj == NULL) { if (obj == NULL) {
// The monitor is not associated with an object. // The monitor is not associated with an object.
// The monitor should either be a thread-specific private // The monitor should either be a thread-specific private
// free list or the global free list. // free list or the global free list.
// obj == NULL IMPLIES mid->is_busy() == 0 // obj == NULL IMPLIES mid->is_busy() == 0
guarantee(!mid->is_busy(), "invariant"); guarantee(!mid->is_busy(), "invariant");
continue; continue;
} }
deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
if (deflated) { if (deflated) {
mid->FreeNext = NULL; mid->FreeNext = NULL;
nScavenged++; nScavenged++;
} else { } else {
nInuse++; nInuse++;
}
} }
} }
} }
@ -1789,18 +1794,18 @@ void ObjectSynchronizer::sanity_checks(const bool verbose,
// Verify all monitors in the monitor cache, the verification is weak. // Verify all monitors in the monitor cache, the verification is weak.
void ObjectSynchronizer::verify() { void ObjectSynchronizer::verify() {
PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; PaddedEnd<ObjectMonitor> * block =
ObjectMonitor* mid; (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
while (block) { while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) { for (int i = 1; i < _BLOCKSIZE; i++) {
mid = (ObjectMonitor *)(block + i); ObjectMonitor* mid = (ObjectMonitor *)(block + i);
oop object = (oop) mid->object(); oop object = (oop)mid->object();
if (object != NULL) { if (object != NULL) {
mid->verify(); mid->verify();
} }
} }
block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
} }
} }
@ -1809,19 +1814,19 @@ void ObjectSynchronizer::verify() {
// the list of extant blocks without taking a lock. // the list of extant blocks without taking a lock.
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; PaddedEnd<ObjectMonitor> * block =
(PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
while (block) { while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header"); assert(block->object() == CHAINMARKER, "must be a block header");
if (monitor > (ObjectMonitor *)&block[0] && if (monitor > (ObjectMonitor *)&block[0] &&
monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
address mon = (address) monitor; address mon = (address)monitor;
address blk = (address) block; address blk = (address)block;
size_t diff = mon - blk; size_t diff = mon - blk;
assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check"); assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
return 1; return 1;
} }
block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
} }
return 0; return 0;
} }

View file

@ -140,7 +140,7 @@ class ObjectSynchronizer : AllStatic {
// global list of blocks of monitors // global list of blocks of monitors
// gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
// want to expose the PaddedEnd template more than necessary. // want to expose the PaddedEnd template more than necessary.
static ObjectMonitor * gBlockList; static ObjectMonitor * volatile gBlockList;
// global monitor free list // global monitor free list
static ObjectMonitor * volatile gFreeList; static ObjectMonitor * volatile gFreeList;
// global monitor in-use list, for moribund threads, // global monitor in-use list, for moribund threads,

View file

@ -284,6 +284,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
#define VM_STRUCTS(nonstatic_field, \ #define VM_STRUCTS(nonstatic_field, \
static_field, \ static_field, \
static_ptr_volatile_field, \
unchecked_nonstatic_field, \ unchecked_nonstatic_field, \
volatile_nonstatic_field, \ volatile_nonstatic_field, \
nonproduct_nonstatic_field, \ nonproduct_nonstatic_field, \
@ -1183,7 +1184,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \ volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
nonstatic_field(BasicObjectLock, _lock, BasicLock) \ nonstatic_field(BasicObjectLock, _lock, BasicLock) \
nonstatic_field(BasicObjectLock, _obj, oop) \ nonstatic_field(BasicObjectLock, _obj, oop) \
static_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \ static_ptr_volatile_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \
\ \
/*********************/ \ /*********************/ \
/* Matcher (C2 only) */ \ /* Matcher (C2 only) */ \
@ -2902,6 +2903,11 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
#define GENERATE_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ #define GENERATE_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{ QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 1, 0, &typeName::fieldName }, { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 1, 0, &typeName::fieldName },
// This macro generates a VMStructEntry line for a static pointer volatile field,
// e.g.: "static ObjectMonitor * volatile gBlockList;"
#define GENERATE_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{ QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 1, 0, (void *)&typeName::fieldName },
// This macro generates a VMStructEntry line for an unchecked // This macro generates a VMStructEntry line for an unchecked
// nonstatic field, in which the size of the type is also specified. // nonstatic field, in which the size of the type is also specified.
// The type string is given as NULL, indicating an "opaque" type. // The type string is given as NULL, indicating an "opaque" type.
@ -2927,10 +2933,15 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
#define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ #define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; } {typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; }
// This macro checks the type of a VMStructEntry by comparing pointer types // This macro checks the type of a static VMStructEntry by comparing pointer types
#define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ #define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{type* dummy = &typeName::fieldName; } {type* dummy = &typeName::fieldName; }
// This macro checks the type of a static pointer volatile VMStructEntry by comparing pointer types,
// e.g.: "static ObjectMonitor * volatile gBlockList;"
#define CHECK_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{type volatile * dummy = &typeName::fieldName; }
// This macro ensures the type of a field and its containing type are // This macro ensures the type of a field and its containing type are
// present in the type table. The assertion string is shorter than // present in the type table. The assertion string is shorter than
// preferable because (incredibly) of a bug in Solstice NFS client // preferable because (incredibly) of a bug in Solstice NFS client
@ -3141,6 +3152,7 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = {
VM_STRUCTS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, VM_STRUCTS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_STATIC_VM_STRUCT_ENTRY, GENERATE_STATIC_VM_STRUCT_ENTRY,
GENERATE_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY,
GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
@ -3370,6 +3382,7 @@ void
VMStructs::init() { VMStructs::init() {
VM_STRUCTS(CHECK_NONSTATIC_VM_STRUCT_ENTRY, VM_STRUCTS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY, CHECK_STATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_PTR_VOLATILE_VM_STRUCT_ENTRY,
CHECK_NO_OP, CHECK_NO_OP,
CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY, CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY, CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
@ -3491,8 +3504,10 @@ VMStructs::init() {
CHECK_NO_OP, CHECK_NO_OP,
CHECK_NO_OP, CHECK_NO_OP,
CHECK_NO_OP, CHECK_NO_OP,
CHECK_NO_OP,
CHECK_NO_OP)); CHECK_NO_OP));
debug_only(VM_STRUCTS(CHECK_NO_OP, debug_only(VM_STRUCTS(CHECK_NO_OP,
ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT,
CHECK_NO_OP, CHECK_NO_OP,
ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT,