8078023: verify_no_cset_oops found reclaimed humongous object in SATB buffer

Removed no longer valid checking of SATB buffers

Reviewed-by: jmasa, pliden
This commit is contained in:
Kim Barrett 2015-04-22 14:06:49 -04:00
parent ace7f7933e
commit 5f1b96662a
5 changed files with 45 additions and 130 deletions

View file

@ -2793,9 +2793,7 @@ ConcurrentMark::claim_region(uint worker_id) {
#ifndef PRODUCT #ifndef PRODUCT
enum VerifyNoCSetOopsPhase { enum VerifyNoCSetOopsPhase {
VerifyNoCSetOopsStack, VerifyNoCSetOopsStack,
VerifyNoCSetOopsQueues, VerifyNoCSetOopsQueues
VerifyNoCSetOopsSATBCompleted,
VerifyNoCSetOopsSATBThread
}; };
class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
@ -2808,8 +2806,6 @@ private:
switch (_phase) { switch (_phase) {
case VerifyNoCSetOopsStack: return "Stack"; case VerifyNoCSetOopsStack: return "Stack";
case VerifyNoCSetOopsQueues: return "Queue"; case VerifyNoCSetOopsQueues: return "Queue";
case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
return NULL; return NULL;
@ -2836,7 +2832,7 @@ public:
virtual void do_oop(narrowOop* p) { virtual void do_oop(narrowOop* p) {
// We should not come across narrow oops while scanning marking // We should not come across narrow oops while scanning marking
// stacks and SATB buffers. // stacks
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -2845,10 +2841,7 @@ public:
} }
}; };
void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, void ConcurrentMark::verify_no_cset_oops() {
bool verify_enqueued_buffers,
bool verify_thread_buffers,
bool verify_fingers) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
if (!G1CollectedHeap::heap()->mark_in_progress()) { if (!G1CollectedHeap::heap()->mark_in_progress()) {
return; return;
@ -2856,65 +2849,47 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
VerifyNoCSetOopsClosure cl; VerifyNoCSetOopsClosure cl;
if (verify_stacks) { // Verify entries on the global mark stack
// Verify entries on the global mark stack cl.set_phase(VerifyNoCSetOopsStack);
cl.set_phase(VerifyNoCSetOopsStack); _markStack.oops_do(&cl);
_markStack.oops_do(&cl);
// Verify entries on the task queues // Verify entries on the task queues
for (uint i = 0; i < _max_worker_id; i += 1) { for (uint i = 0; i < _max_worker_id; i += 1) {
cl.set_phase(VerifyNoCSetOopsQueues, i); cl.set_phase(VerifyNoCSetOopsQueues, i);
CMTaskQueue* queue = _task_queues->queue(i); CMTaskQueue* queue = _task_queues->queue(i);
queue->oops_do(&cl); queue->oops_do(&cl);
}
} }
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); // Verify the global finger
HeapWord* global_finger = finger();
// Verify entries on the enqueued SATB buffers if (global_finger != NULL && global_finger < _heap_end) {
if (verify_enqueued_buffers) { // The global finger always points to a heap region boundary. We
cl.set_phase(VerifyNoCSetOopsSATBCompleted); // use heap_region_containing_raw() to get the containing region
satb_qs.iterate_completed_buffers_read_only(&cl); // given that the global finger could be pointing to a free region
// which subsequently becomes continues humongous. If that
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
// Since we always iterate over all regions, we might get a NULL HeapRegion
// here.
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
} }
// Verify entries on the per-thread SATB buffers // Verify the task fingers
if (verify_thread_buffers) { assert(parallel_marking_threads() <= _max_worker_id, "sanity");
cl.set_phase(VerifyNoCSetOopsSATBThread); for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
satb_qs.iterate_thread_buffers_read_only(&cl); CMTask* task = _tasks[i];
} HeapWord* task_finger = task->finger();
if (task_finger != NULL && task_finger < _heap_end) {
if (verify_fingers) { // See above note on the global finger verification.
// Verify the global finger HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
HeapWord* global_finger = finger(); guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
if (global_finger != NULL && global_finger < _heap_end) { !task_hr->in_collection_set(),
// The global finger always points to a heap region boundary. We err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
// use heap_region_containing_raw() to get the containing region p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
// given that the global finger could be pointing to a free region
// which subsequently becomes continues humongous. If that
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
// Since we always iterate over all regions, we might get a NULL HeapRegion
// here.
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
}
// Verify the task fingers
assert(parallel_marking_threads() <= _max_worker_id, "sanity");
for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
CMTask* task = _tasks[i];
HeapWord* task_finger = task->finger();
if (task_finger != NULL && task_finger < _heap_end) {
// See above note on the global finger verification.
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
!task_hr->in_collection_set(),
err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
}
} }
} }
} }

View file

@ -785,14 +785,9 @@ public:
} }
// Verify that there are no CSet oops on the stacks (taskqueues / // Verify that there are no CSet oops on the stacks (taskqueues /
// global mark stack), enqueued SATB buffers, per-thread SATB // global mark stack) and fingers (global / per-task).
// buffers, and fingers (global / per-task). The boolean parameters // If marking is not in progress, it's a no-op.
// decide which of the above data structures to verify. If marking void verify_no_cset_oops() PRODUCT_RETURN;
// is not in progress, it's a no-op.
void verify_no_cset_oops(bool verify_stacks,
bool verify_enqueued_buffers,
bool verify_thread_buffers,
bool verify_fingers) PRODUCT_RETURN;
bool isPrevMarked(oop p) const { bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop"); assert(p != NULL && p->is_oop(), "expected an oop");

View file

@ -3806,14 +3806,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
_cm->note_start_of_gc(); _cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that // We call this after finalize_cset() to
// we have not filtered them yet (we'll do so during the
// GC). We also call this after finalize_cset() to
// ensure that the CSet has been finalized. // ensure that the CSet has been finalized.
_cm->verify_no_cset_oops(true /* verify_stacks */, _cm->verify_no_cset_oops();
true /* verify_enqueued_buffers */,
false /* verify_thread_buffers */,
true /* verify_fingers */);
if (_hr_printer.is_active()) { if (_hr_printer.is_active()) {
HeapRegion* hr = g1_policy()->collection_set(); HeapRegion* hr = g1_policy()->collection_set();
@ -3836,16 +3831,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Actually do the work... // Actually do the work...
evacuate_collection_set(evacuation_info); evacuate_collection_set(evacuation_info);
// We do this to mainly verify the per-thread SATB buffers
// (which have been filtered by now) since we didn't verify
// them earlier. No point in re-checking the stacks / enqueued
// buffers given that the CSet has not changed since last time
// we checked.
_cm->verify_no_cset_oops(false /* verify_stacks */,
false /* verify_enqueued_buffers */,
true /* verify_thread_buffers */,
true /* verify_fingers */);
free_collection_set(g1_policy()->collection_set(), evacuation_info); free_collection_set(g1_policy()->collection_set(), evacuation_info);
eagerly_reclaim_humongous_regions(); eagerly_reclaim_humongous_regions();
@ -3928,10 +3913,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We redo the verification but now wrt to the new CSet which // We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed. // has just got initialized after the previous CSet was freed.
_cm->verify_no_cset_oops(true /* verify_stacks */, _cm->verify_no_cset_oops();
true /* verify_enqueued_buffers */,
true /* verify_thread_buffers */,
true /* verify_fingers */);
_cm->note_end_of_gc(); _cm->note_end_of_gc();
// This timing is only used by the ergonomics to handle our pause target. // This timing is only used by the ergonomics to handle our pause target.

View file

@ -180,12 +180,6 @@ bool ObjPtrQueue::should_enqueue_buffer() {
return should_enqueue; return should_enqueue;
} }
void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz);
}
}
void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) { void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
if (_buf != NULL) { if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz); apply_closure_to_buffer(cl, _buf, _index, _sz);
@ -317,28 +311,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) {
} }
} }
void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
assert(cl != NULL, "pre-condition");
BufferNode* nd = _completed_buffers_head;
while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd);
ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
nd = nd->next();
}
}
void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
assert(cl != NULL, "pre-condition");
for (JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().apply_closure(cl);
}
shared_satb_queue()->apply_closure(cl);
}
#ifndef PRODUCT #ifndef PRODUCT
// Helpful for debugging // Helpful for debugging

View file

@ -41,9 +41,6 @@ private:
// Filter out unwanted entries from the buffer. // Filter out unwanted entries from the buffer.
void filter(); void filter();
// Apply the closure to all elements.
void apply_closure(ObjectClosure* cl);
// Apply the closure to all elements and empty the buffer; // Apply the closure to all elements and empty the buffer;
void apply_closure_and_empty(ObjectClosure* cl); void apply_closure_and_empty(ObjectClosure* cl);
@ -105,12 +102,6 @@ public:
// completed buffers exist, return false. // completed buffers exist, return false.
bool apply_closure_to_completed_buffer(ObjectClosure* closure); bool apply_closure_to_completed_buffer(ObjectClosure* closure);
// Apply the given closure on enqueued and currently-active buffers
// respectively. Both methods are read-only, i.e., they do not
// modify any of the buffers.
void iterate_completed_buffers_read_only(ObjectClosure* cl);
void iterate_thread_buffers_read_only(ObjectClosure* cl);
#ifndef PRODUCT #ifndef PRODUCT
// Helpful for debugging // Helpful for debugging
void print_all(const char* msg); void print_all(const char* msg);