8066133: Fix missing reivew changes for JDK-8065972

Reviewed-by: mgerdin, stefank
This commit is contained in:
Bengt Rutisson 2014-11-28 08:20:52 +01:00
parent 0ecc753586
commit d8635f58cd
11 changed files with 4 additions and 440 deletions

View file

@ -1201,14 +1201,6 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
return obj; return obj;
} }
void
ConcurrentMarkSweepGeneration::
par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz) {
// CMS does not support promotion undo.
ShouldNotReachHere();
}
void void
ConcurrentMarkSweepGeneration:: ConcurrentMarkSweepGeneration::
par_promote_alloc_done(int thread_num) { par_promote_alloc_done(int thread_num) {

View file

@ -1151,9 +1151,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Overrides for parallel promotion. // Overrides for parallel promotion.
virtual oop par_promote(int thread_num, virtual oop par_promote(int thread_num,
oop obj, markOop m, size_t word_sz); oop obj, markOop m, size_t word_sz);
// This one should not be called for CMS.
virtual void par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz);
virtual void par_promote_alloc_done(int thread_num); virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num); virtual void par_oop_since_save_marks_iterate_done(int thread_num);

View file

@ -884,8 +884,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
// A Generation that does parallel young-gen collection. // A Generation that does parallel young-gen collection.
bool ParNewGeneration::_avoid_promotion_undo = false;
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
assert(_promo_failure_scan_stack.is_empty(), "post condition"); assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments. _promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -934,10 +932,6 @@ void ParNewGeneration::collect(bool full,
assert(gch->n_gens() == 2, assert(gch->n_gens() == 2,
"Par collection currently only works with single older gen."); "Par collection currently only works with single older gen.");
_next_gen = gch->next_gen(this); _next_gen = gch->next_gen(this);
// Do we have to avoid promotion_undo?
if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
set_avoid_promotion_undo(true);
}
// If the next generation is too full to accommodate worst-case promotion // If the next generation is too full to accommodate worst-case promotion
// from this generation, pass on collection; let the next generation // from this generation, pass on collection; let the next generation
@ -1141,7 +1135,7 @@ oop ParNewGeneration::real_forwardee_slow(oop obj) {
#ifdef ASSERT #ifdef ASSERT
bool ParNewGeneration::is_legal_forward_ptr(oop p) { bool ParNewGeneration::is_legal_forward_ptr(oop p) {
return return
(_avoid_promotion_undo && p == ClaimedForwardPtr) (p == ClaimedForwardPtr)
|| Universe::heap()->is_in_reserved(p); || Universe::heap()->is_in_reserved(p);
} }
#endif #endif
@ -1162,7 +1156,7 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
// thus avoiding the need to undo the copy as in // thus avoiding the need to undo the copy as in
// copy_to_survivor_space_avoiding_with_undo. // copy_to_survivor_space_avoiding_with_undo.
oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( oop ParNewGeneration::copy_to_survivor_space(
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
// In the sequential version, this assert also says that the object is // In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that // not forwarded. That might not be the case here. It is the case that
@ -1282,131 +1276,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
return forward_ptr; return forward_ptr;
} }
// Multiple GC threads may try to promote the same object. If two
// or more GC threads copy the object, only one wins the race to install
// the forwarding pointer. The other threads have to undo their copy.
oop ParNewGeneration::copy_to_survivor_space_with_undo(
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past.
assert(is_in_reserved(old), "shouldn't be scavenging this oop");
// The sequential code read "old->age()" below. That doesn't work here,
// since the age is in the mark word, and that might be overwritten with
// a forwarding pointer by a parallel thread. So we must save the mark
// word here, install it in a local oopDesc, and then analyze it.
oopDesc dummyOld;
dummyOld.set_mark(m);
assert(!dummyOld.is_forwarded(),
"should not be called with forwarding pointer mark word.");
bool failed_to_promote = false;
oop new_obj = NULL;
oop forward_ptr;
// Try allocating obj in to-space (unless too old)
if (dummyOld.age() < tenuring_threshold()) {
new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
if (new_obj == NULL) {
set_survivor_overflow(true);
}
}
if (new_obj == NULL) {
// Either to-space is full or we decided to promote
// try allocating obj tenured
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
old, m, sz);
if (new_obj == NULL) {
// promotion failed, forward to self
forward_ptr = old->forward_to_atomic(old);
new_obj = old;
if (forward_ptr != NULL) {
return forward_ptr; // someone else succeeded
}
_promotion_failed = true;
failed_to_promote = true;
preserve_mark_if_necessary(old, m);
par_scan_state->register_promotion_failure(sz);
}
} else {
// Is in to-space; do copying ourselves.
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
// Restore the mark word copied above.
new_obj->set_mark(m);
// Increment age if new_obj still in new generation
new_obj->incr_age();
par_scan_state->age_table()->add(new_obj, sz);
}
assert(new_obj != NULL, "just checking");
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
is_in_reserved(new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
}
#endif
// Now attempt to install the forwarding pointer (atomically).
// We have to copy the mark word before overwriting with forwarding
// ptr, so we can restore it below in the copy.
if (!failed_to_promote) {
forward_ptr = old->forward_to_atomic(new_obj);
}
if (forward_ptr == NULL) {
oop obj_to_push = new_obj;
if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
// Length field used as index of next element to be scanned.
// Real length can be obtained from real_forwardee()
arrayOop(old)->set_length(0);
obj_to_push = old;
assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
"push forwarded object");
}
// Push it on one of the queues of to-be-scanned objects.
bool simulate_overflow = false;
NOT_PRODUCT(
if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
// Add stats for overflow pushes.
push_on_overflow_list(old, par_scan_state);
TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
}
return new_obj;
}
// Oops. Someone beat us to it. Undo the allocation. Where did we
// allocate it?
if (is_in_reserved(new_obj)) {
// Must be in to_space.
assert(to()->is_in_reserved(new_obj), "Checking");
par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
} else {
assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
_next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
(HeapWord*)new_obj, sz);
}
return forward_ptr;
}
#ifndef PRODUCT #ifndef PRODUCT
// It's OK to call this multi-threaded; the worst thing // It's OK to call this multi-threaded; the worst thing
// that can happen is that we'll get a bunch of closely // that can happen is that we'll get a bunch of closely

View file

@ -329,9 +329,6 @@ class ParNewGeneration: public DefNewGeneration {
oop _overflow_list; oop _overflow_list;
NOT_PRODUCT(ssize_t _num_par_pushes;) NOT_PRODUCT(ssize_t _num_par_pushes;)
// If true, older generation does not support promotion undo, so avoid.
static bool _avoid_promotion_undo;
// This closure is used by the reference processor to filter out // This closure is used by the reference processor to filter out
// references to live referent. // references to live referent.
DefNewGeneration::IsAliveClosure _is_alive_closure; DefNewGeneration::IsAliveClosure _is_alive_closure;
@ -349,9 +346,6 @@ class ParNewGeneration: public DefNewGeneration {
bool _survivor_overflow; bool _survivor_overflow;
bool avoid_promotion_undo() { return _avoid_promotion_undo; }
void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
bool survivor_overflow() { return _survivor_overflow; } bool survivor_overflow() { return _survivor_overflow; }
void set_survivor_overflow(bool v) { _survivor_overflow = v; } void set_survivor_overflow(bool v) { _survivor_overflow = v; }
@ -385,20 +379,7 @@ class ParNewGeneration: public DefNewGeneration {
// "obj" is the object to be copied, "m" is a recent value of its mark // "obj" is the object to be copied, "m" is a recent value of its mark
// that must not contain a forwarding pointer (though one might be // that must not contain a forwarding pointer (though one might be
// inserted in "obj"s mark word by a parallel thread). // inserted in "obj"s mark word by a parallel thread).
inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state, oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m) {
if (_avoid_promotion_undo) {
return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
obj, obj_sz, m);
}
return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
}
oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m);
oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m); oop obj, size_t obj_sz, markOop m);
// in support of testing overflow code // in support of testing overflow code

View file

@ -142,216 +142,3 @@ void ParGCAllocBuffer::print() {
"FT"[_retained], _retained_filler.start(), _retained_filler.end()); "FT"[_retained], _retained_filler.start(), _retained_filler.end());
} }
#endif // !PRODUCT #endif // !PRODUCT
const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
((size_t)Generation::GenGrain)/HeapWordSize);
const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
(size_t)Generation::GenGrain);
ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
BlockOffsetSharedArray* bsa) :
ParGCAllocBuffer(word_sz),
_bsa(bsa),
_bt(bsa, MemRegion(_bottom, _hard_end)),
_true_end(_hard_end)
{}
// The buffer comes with its own BOT, with a shared (obviously) underlying
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
// as we would for any contiguous space. However, on occasion we
// need to do some buffer surgery at the extremities before we
// start using the body of the buffer for allocations. Such surgery
// (as explained elsewhere) is to prevent allocation on a card that
// is in the process of being walked concurrently by another GC thread.
// When such surgery happens at a point that is far removed (to the
// right of the current allocation point, top), we use the "contig"
// parameter below to directly manipulate the shared array without
// modifying the _next_threshold state in the BOT.
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
bool contig) {
CollectedHeap::fill_with_object(mr);
if (contig) {
_bt.alloc_block(mr.start(), mr.end());
} else {
_bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
}
}
HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
HeapWord* res = NULL;
if (_true_end > _hard_end) {
assert((HeapWord*)align_size_down(intptr_t(_hard_end),
ChunkSizeInBytes) == _hard_end,
"or else _true_end should be equal to _hard_end");
assert(_retained, "or else _true_end should be equal to _hard_end");
assert(_retained_filler.end() <= _top, "INVARIANT");
CollectedHeap::fill_with_object(_retained_filler);
if (_top < _hard_end) {
fill_region_with_block(MemRegion(_top, _hard_end), true);
}
HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
_retained_filler = MemRegion(_hard_end, FillerHeaderSize);
_bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
_top = _retained_filler.end();
_hard_end = next_hard_end;
_end = _hard_end - AlignmentReserve;
res = ParGCAllocBuffer::allocate(word_sz);
if (res != NULL) {
_bt.alloc_block(res, word_sz);
}
}
return res;
}
void
ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
ParGCAllocBuffer::undo_allocation(obj, word_sz);
// This may back us up beyond the previous threshold, so reset.
_bt.set_region(MemRegion(_top, _hard_end));
_bt.initialize_threshold();
}
void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
assert(!retain || end_of_gc, "Can only retain at GC end.");
if (_retained) {
// We're about to make the retained_filler into a block.
_bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
_retained_filler.end());
}
// Reset _hard_end to _true_end (and update _end)
if (retain && _hard_end != NULL) {
assert(_hard_end <= _true_end, "Invariant.");
_hard_end = _true_end;
_end = MAX2(_top, _hard_end - AlignmentReserve);
assert(_end <= _hard_end, "Invariant.");
}
_true_end = _hard_end;
HeapWord* pre_top = _top;
ParGCAllocBuffer::retire(end_of_gc, retain);
// Now any old _retained_filler is cut back to size, the free part is
// filled with a filler object, and top is past the header of that
// object.
if (retain && _top < _end) {
assert(end_of_gc && retain, "Or else retain should be false.");
// If the lab does not start on a card boundary, we don't want to
// allocate onto that card, since that might lead to concurrent
// allocation and card scanning, which we don't support. So we fill
// the first card with a garbage object.
size_t first_card_index = _bsa->index_for(pre_top);
HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
if (first_card_start < pre_top) {
HeapWord* second_card_start =
_bsa->inc_by_region_size(first_card_start);
// Ensure enough room to fill with the smallest block
second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
// If the end is already in the first card, don't go beyond it!
// Or if the remainder is too small for a filler object, gobble it up.
if (_hard_end < second_card_start ||
pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
second_card_start = _hard_end;
}
if (pre_top < second_card_start) {
MemRegion first_card_suffix(pre_top, second_card_start);
fill_region_with_block(first_card_suffix, true);
}
pre_top = second_card_start;
_top = pre_top;
_end = MAX2(_top, _hard_end - AlignmentReserve);
}
// If the lab does not end on a card boundary, we don't want to
// allocate onto that card, since that might lead to concurrent
// allocation and card scanning, which we don't support. So we fill
// the last card with a garbage object.
size_t last_card_index = _bsa->index_for(_hard_end);
HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
if (last_card_start < _hard_end) {
// Ensure enough room to fill with the smallest block
last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
// If the top is already in the last card, don't go back beyond it!
// Or if the remainder is too small for a filler object, gobble it up.
if (_top > last_card_start ||
pointer_delta(last_card_start, _top) < AlignmentReserve) {
last_card_start = _top;
}
if (last_card_start < _hard_end) {
MemRegion last_card_prefix(last_card_start, _hard_end);
fill_region_with_block(last_card_prefix, false);
}
_hard_end = last_card_start;
_end = MAX2(_top, _hard_end - AlignmentReserve);
_true_end = _hard_end;
assert(_end <= _hard_end, "Invariant.");
}
// At this point:
// 1) we had a filler object from the original top to hard_end.
// 2) We've filled in any partial cards at the front and back.
if (pre_top < _hard_end) {
// Now we can reset the _bt to do allocation in the given area.
MemRegion new_filler(pre_top, _hard_end);
fill_region_with_block(new_filler, false);
_top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
// If there's no space left, don't retain.
if (_top >= _end) {
_retained = false;
invalidate();
return;
}
_retained_filler = MemRegion(pre_top, _top);
_bt.set_region(MemRegion(_top, _hard_end));
_bt.initialize_threshold();
assert(_bt.threshold() > _top, "initialize_threshold failed!");
// There may be other reasons for queries into the middle of the
// filler object. When such queries are done in parallel with
// allocation, bad things can happen, if the query involves object
// iteration. So we ensure that such queries do not involve object
// iteration, by putting another filler object on the boundaries of
// such queries. One such is the object spanning a parallel card
// chunk boundary.
// "chunk_boundary" is the address of the first chunk boundary less
// than "hard_end".
HeapWord* chunk_boundary =
(HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
assert(chunk_boundary < _hard_end, "Or else above did not work.");
assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
"Consequence of last card handling above.");
if (_top <= chunk_boundary) {
assert(_true_end == _hard_end, "Invariant.");
while (_top <= chunk_boundary) {
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
"Consequence of last card handling above.");
_bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
_hard_end = chunk_boundary;
chunk_boundary -= ChunkSizeInWords;
}
_end = _hard_end - AlignmentReserve;
assert(_top <= _end, "Invariant.");
// Now reset the initial filler chunk so it doesn't overlap with
// the one(s) inserted above.
MemRegion new_filler(pre_top, _hard_end);
fill_region_with_block(new_filler, false);
}
} else {
_retained = false;
invalidate();
}
} else {
assert(!end_of_gc ||
(!_retained && _true_end == _hard_end), "Checking.");
}
assert(_end <= _hard_end, "Invariant.");
assert(_top < _end || _top == _hard_end, "Invariant");
}

View file

@ -216,44 +216,4 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
} }
}; };
class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
BlockOffsetArrayContigSpace _bt;
BlockOffsetSharedArray* _bsa;
HeapWord* _true_end; // end of the whole ParGCAllocBuffer
static const size_t ChunkSizeInWords;
static const size_t ChunkSizeInBytes;
HeapWord* allocate_slow(size_t word_sz);
void fill_region_with_block(MemRegion mr, bool contig);
public:
ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
HeapWord* allocate(size_t word_sz) {
HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
if (res != NULL) {
_bt.alloc_block(res, word_sz);
} else {
res = allocate_slow(word_sz);
}
return res;
}
void undo_allocation(HeapWord* obj, size_t word_sz);
virtual void set_buf(HeapWord* buf_start) {
ParGCAllocBuffer::set_buf(buf_start);
_true_end = _hard_end;
_bt.set_region(MemRegion(buf_start, word_sz()));
_bt.initialize_threshold();
}
virtual void retire(bool end_of_gc, bool retain);
MemRegion range() {
return MemRegion(_top, _true_end);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP

View file

@ -251,12 +251,6 @@ public:
// Return the address indicating the start of the region corresponding to // Return the address indicating the start of the region corresponding to
// "index" in "_offset_array". // "index" in "_offset_array".
HeapWord* address_for_index(size_t index) const; HeapWord* address_for_index(size_t index) const;
// Return the address "p" incremented by the size of
// a region. This method does not align the address
// returned to the start of a region. It is a simple
// primitive.
HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
}; };
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////

View file

@ -466,11 +466,6 @@ public:
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
static size_t par_chunk_heapword_alignment() {
return ParGCCardsPerStrideChunk * card_size_in_words;
}
}; };
class CardTableRS; class CardTableRS;

View file

@ -220,12 +220,6 @@ oop Generation::par_promote(int thread_num,
return NULL; return NULL;
} }
void Generation::par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz) {
// Could do a bad general impl here that gets a lock. But no.
guarantee(false, "No good general implementation.");
}
Space* Generation::space_containing(const void* p) const { Space* Generation::space_containing(const void* p) const {
GenerationIsInReservedClosure blk(p); GenerationIsInReservedClosure blk(p);
// Cast away const // Cast away const

View file

@ -317,11 +317,6 @@ class Generation: public CHeapObj<mtGC> {
virtual oop par_promote(int thread_num, virtual oop par_promote(int thread_num,
oop obj, markOop m, size_t word_sz); oop obj, markOop m, size_t word_sz);
// Undo, if possible, the most recent par_promote_alloc allocation by
// "thread_num" ("obj", of "word_sz").
virtual void par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz);
// Informs the current generation that all par_promote_alloc's in the // Informs the current generation that all par_promote_alloc's in the
// collection have been completed; any supporting data structures can be // collection have been completed; any supporting data structures can be
// reset. Default is to do nothing. // reset. Default is to do nothing.

View file

@ -25,7 +25,7 @@
* @test TestParNewSerialOld * @test TestParNewSerialOld
* @key gc * @key gc
* @bug 8065972 * @bug 8065972
* @summary Test that the deprecated ParNew+SerialOld combination print a warning message * @summary Test that the unsupported ParNew+SerialOld combination does not start
* @library /testlibrary * @library /testlibrary
*/ */