mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 06:45:07 +02:00
8195103: Refactor out card table from CardTableModRefBS to flatten the BarrierSet hierarchy
Reviewed-by: kbarrett, tschatzl
This commit is contained in:
parent
ceb48aba9b
commit
0fb7dffb83
32 changed files with 282 additions and 372 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -75,10 +75,6 @@ public:
|
|||
// supports. Caller does not hold the Heap_lock on entry.
|
||||
void collect(GCCause::Cause cause);
|
||||
|
||||
bool card_mark_must_follow_store() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
void stop();
|
||||
void safepoint_synchronize_begin();
|
||||
void safepoint_synchronize_end();
|
||||
|
|
|
@ -1573,7 +1573,6 @@ jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
|
|||
}
|
||||
|
||||
jint G1CollectedHeap::initialize() {
|
||||
CollectedHeap::pre_initialize();
|
||||
os::enable_vtime();
|
||||
|
||||
// Necessary to satisfy locking discipline assertions.
|
||||
|
|
|
@ -1272,36 +1272,8 @@ public:
|
|||
size_t max_tlab_size() const;
|
||||
size_t unsafe_max_tlab_alloc(Thread* ignored) const;
|
||||
|
||||
// Can a compiler initialize a new object without store barriers?
|
||||
// This permission only extends from the creation of a new object
|
||||
// via a TLAB up to the first subsequent safepoint. If such permission
|
||||
// is granted for this heap type, the compiler promises to call
|
||||
// defer_store_barrier() below on any slow path allocation of
|
||||
// a new object for which such initializing store barriers will
|
||||
// have been elided. G1, like CMS, allows this, but should be
|
||||
// ready to provide a compensating write barrier as necessary
|
||||
// if that storage came out of a non-young region. The efficiency
|
||||
// of this implementation depends crucially on being able to
|
||||
// answer very efficiently in constant time whether a piece of
|
||||
// storage in the heap comes from a young region or not.
|
||||
// See ReduceInitialCardMarks.
|
||||
virtual bool can_elide_tlab_store_barriers() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool is_in_young(const oop obj);
|
||||
|
||||
// We don't need barriers for initializing stores to objects
|
||||
// in the young gen: for the SATB pre-barrier, there is no
|
||||
// pre-value that needs to be remembered; for the remembered-set
|
||||
// update logging post-barrier, we don't maintain remembered set
|
||||
// information for young gen objects.
|
||||
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
|
||||
|
||||
// Returns "true" iff the given word_size is "very large".
|
||||
static bool is_humongous(size_t word_size) {
|
||||
// Note this has to be strictly greater-than as the TLABs
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -241,15 +241,6 @@ inline bool G1CollectedHeap::is_in_young(const oop obj) {
|
|||
return heap_region_containing(obj)->is_young();
|
||||
}
|
||||
|
||||
// We don't need barriers for initializing stores to objects
|
||||
// in the young gen: for the SATB pre-barrier, there is no
|
||||
// pre-value that needs to be remembered; for the remembered-set
|
||||
// update logging post-barrier, we don't maintain remembered set
|
||||
// information for young gen objects.
|
||||
inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -131,6 +131,7 @@ G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
|
|||
}
|
||||
|
||||
void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
|
||||
initialize_deferred_card_mark_barriers();
|
||||
mapper->set_mapping_changed_listener(&_listener);
|
||||
|
||||
_byte_map_size = mapper->reserved().byte_size();
|
||||
|
@ -213,3 +214,14 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
|
||||
volatile jbyte* p = byte_for((void*)obj);
|
||||
return *p == g1_young_card_val();
|
||||
}
|
||||
|
||||
void G1SATBCardTableLoggingModRefBS::flush_deferred_barriers(JavaThread* thread) {
|
||||
CardTableModRefBS::flush_deferred_barriers(thread);
|
||||
thread->satb_mark_queue().flush();
|
||||
thread->dirty_card_queue().flush();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -92,6 +92,8 @@ public:
|
|||
jbyte val = _byte_map[card_index];
|
||||
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
||||
}
|
||||
|
||||
virtual bool is_in_young(oop obj) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
|
@ -145,13 +147,19 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
|||
// above no longer applies.
|
||||
void invalidate(MemRegion mr);
|
||||
|
||||
void write_region_work(MemRegion mr) { invalidate(mr); }
|
||||
void write_region(MemRegion mr) { invalidate(mr); }
|
||||
void write_ref_array_work(MemRegion mr) { invalidate(mr); }
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
void write_ref_field_post(T* field, oop new_val);
|
||||
void write_ref_field_post_slow(volatile jbyte* byte);
|
||||
|
||||
virtual void flush_deferred_barriers(JavaThread* thread);
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Callbacks for runtime accesses.
|
||||
template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
|
||||
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -26,7 +26,7 @@
|
|||
#include "gc/parallel/cardTableExtension.hpp"
|
||||
#include "gc/parallel/gcTaskManager.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc/parallel/psPromotionManager.inline.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
#include "gc/parallel/psTasks.hpp"
|
||||
|
@ -677,3 +677,7 @@ HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
|
|||
}
|
||||
return min_start;
|
||||
}
|
||||
|
||||
bool CardTableExtension::is_in_young(oop obj) const {
|
||||
return ParallelScavengeHeap::heap()->is_in_young(obj);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -108,6 +108,13 @@ class CardTableExtension : public CardTableModRefBS {
|
|||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
// ReduceInitialCardMarks support
|
||||
virtual bool is_in_young(oop obj) const;
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -57,8 +57,6 @@ PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
|
|||
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
|
||||
|
||||
jint ParallelScavengeHeap::initialize() {
|
||||
CollectedHeap::pre_initialize();
|
||||
|
||||
const size_t heap_size = _collector_policy->max_heap_byte_size();
|
||||
|
||||
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
|
||||
|
@ -490,13 +488,6 @@ void ParallelScavengeHeap::resize_all_tlabs() {
|
|||
CollectedHeap::resize_all_tlabs();
|
||||
}
|
||||
|
||||
bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
|
||||
// We don't need barriers for stores to objects in the
|
||||
// young gen and, a fortiori, for initializing stores to
|
||||
// objects therein.
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
|
||||
// This method is used by System.gc() and JVMTI.
|
||||
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
assert(!Heap_lock->owned_by_self(),
|
||||
|
@ -719,4 +710,3 @@ GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
|
|||
memory_pools.append(_old_pool);
|
||||
return memory_pools;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -205,21 +205,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||
size_t tlab_used(Thread* thr) const;
|
||||
size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||
|
||||
// Can a compiler initialize a new object without store barriers?
|
||||
// This permission only extends from the creation of a new object
|
||||
// via a TLAB up to the first subsequent safepoint.
|
||||
virtual bool can_elide_tlab_store_barriers() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return true if we don't we need a store barrier for
|
||||
// initializing stores to an object at this address.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj);
|
||||
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -61,10 +61,6 @@ public:
|
|||
virtual bool is_in_closed_subset(const void* p) const {
|
||||
return is_in(p);
|
||||
}
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/fakeRttiSupport.hpp"
|
||||
|
||||
class JavaThread;
|
||||
|
||||
// This class provides the interface between a barrier implementation and
|
||||
// the rest of the system.
|
||||
|
||||
|
@ -107,18 +109,18 @@ public:
|
|||
static void static_write_ref_array_pre(HeapWord* start, size_t count);
|
||||
static void static_write_ref_array_post(HeapWord* start, size_t count);
|
||||
|
||||
// Support for optimizing compilers to call the barrier set on slow path allocations
|
||||
// that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
|
||||
// The allocation is safe to use iff it returns true. If not, the slow-path allocation
|
||||
// is redone until it succeeds. This can e.g. prevent allocations from the slow path
|
||||
// to be in old.
|
||||
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
|
||||
virtual void flush_deferred_barriers(JavaThread* thread) {}
|
||||
virtual void make_parsable(JavaThread* thread) {}
|
||||
|
||||
protected:
|
||||
virtual void write_ref_array_work(MemRegion mr) = 0;
|
||||
|
||||
public:
|
||||
// (For efficiency reasons, this operation is specialized for certain
|
||||
// barrier types. Semantically, it should be thought of as a call to the
|
||||
// virtual "_work" function below, which must implement the barrier.)
|
||||
void write_region(MemRegion mr);
|
||||
|
||||
protected:
|
||||
virtual void write_region_work(MemRegion mr) = 0;
|
||||
|
||||
public:
|
||||
// Inform the BarrierSet that the the covered heap region that starts
|
||||
// with "base" has been changed to have the given size (possibly from 0,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -52,8 +52,4 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
|||
write_ref_array_work(MemRegion(aligned_start, aligned_end));
|
||||
}
|
||||
|
||||
inline void BarrierSet::write_region(MemRegion mr) {
|
||||
write_region_work(mr);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -30,6 +30,7 @@
|
|||
#include "logging/log.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
@ -61,7 +62,8 @@ CardTableModRefBS::CardTableModRefBS(
|
|||
_committed(NULL),
|
||||
_cur_covered_regions(0),
|
||||
_byte_map(NULL),
|
||||
byte_map_base(NULL)
|
||||
byte_map_base(NULL),
|
||||
_defer_initial_card_mark(false)
|
||||
{
|
||||
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
|
||||
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
|
||||
|
@ -75,6 +77,7 @@ CardTableModRefBS::CardTableModRefBS(
|
|||
}
|
||||
|
||||
void CardTableModRefBS::initialize() {
|
||||
initialize_deferred_card_mark_barriers();
|
||||
_guard_index = cards_required(_whole_heap.word_size()) - 1;
|
||||
_last_valid_index = _guard_index - 1;
|
||||
|
||||
|
@ -521,3 +524,112 @@ void CardTableModRefBS::print_on(outputStream* st) const {
|
|||
st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
|
||||
p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
|
||||
}
|
||||
|
||||
// Helper for ReduceInitialCardMarks. For performance,
|
||||
// compiled code may elide card-marks for initializing stores
|
||||
// to a newly allocated object along the fast-path. We
|
||||
// compensate for such elided card-marks as follows:
|
||||
// (a) Generational, non-concurrent collectors, such as
|
||||
// GenCollectedHeap(ParNew,DefNew,Tenured) and
|
||||
// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
|
||||
// need the card-mark if and only if the region is
|
||||
// in the old gen, and do not care if the card-mark
|
||||
// succeeds or precedes the initializing stores themselves,
|
||||
// so long as the card-mark is completed before the next
|
||||
// scavenge. For all these cases, we can do a card mark
|
||||
// at the point at which we do a slow path allocation
|
||||
// in the old gen, i.e. in this call.
|
||||
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
|
||||
// in addition that the card-mark for an old gen allocated
|
||||
// object strictly follow any associated initializing stores.
|
||||
// In these cases, the memRegion remembered below is
|
||||
// used to card-mark the entire region either just before the next
|
||||
// slow-path allocation by this thread or just before the next scavenge or
|
||||
// CMS-associated safepoint, whichever of these events happens first.
|
||||
// (The implicit assumption is that the object has been fully
|
||||
// initialized by this point, a fact that we assert when doing the
|
||||
// card-mark.)
|
||||
// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
|
||||
// G1 concurrent marking is in progress an SATB (pre-write-)barrier
|
||||
// is used to remember the pre-value of any store. Initializing
|
||||
// stores will not need this barrier, so we need not worry about
|
||||
// compensating for the missing pre-barrier here. Turning now
|
||||
// to the post-barrier, we note that G1 needs a RS update barrier
|
||||
// which simply enqueues a (sequence of) dirty cards which may
|
||||
// optionally be refined by the concurrent update threads. Note
|
||||
// that this barrier need only be applied to a non-young write,
|
||||
// but, like in CMS, because of the presence of concurrent refinement
|
||||
// (much like CMS' precleaning), must strictly follow the oop-store.
|
||||
// Thus, using the same protocol for maintaining the intended
|
||||
// invariants turns out, serendepitously, to be the same for both
|
||||
// G1 and CMS.
|
||||
//
|
||||
// For any future collector, this code should be reexamined with
|
||||
// that specific collector in mind, and the documentation above suitably
|
||||
// extended and updated.
|
||||
void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
|
||||
if (!ReduceInitialCardMarks) {
|
||||
return;
|
||||
}
|
||||
// If a previous card-mark was deferred, flush it now.
|
||||
flush_deferred_card_mark_barrier(thread);
|
||||
if (new_obj->is_typeArray() || is_in_young(new_obj)) {
|
||||
// Arrays of non-references don't need a post-barrier.
|
||||
// The deferred_card_mark region should be empty
|
||||
// following the flush above.
|
||||
assert(thread->deferred_card_mark().is_empty(), "Error");
|
||||
} else {
|
||||
MemRegion mr((HeapWord*)new_obj, new_obj->size());
|
||||
assert(!mr.is_empty(), "Error");
|
||||
if (_defer_initial_card_mark) {
|
||||
// Defer the card mark
|
||||
thread->set_deferred_card_mark(mr);
|
||||
} else {
|
||||
// Do the card mark
|
||||
write_region(mr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CardTableModRefBS::initialize_deferred_card_mark_barriers() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
|
||||
// otherwise remains unused.
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
_defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
|
||||
&& (DeferInitialCardMark || card_mark_must_follow_store());
|
||||
#else
|
||||
assert(_defer_initial_card_mark == false, "Who would set it?");
|
||||
#endif
|
||||
}
|
||||
|
||||
void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
MemRegion deferred = thread->deferred_card_mark();
|
||||
if (!deferred.is_empty()) {
|
||||
assert(_defer_initial_card_mark, "Otherwise should be empty");
|
||||
{
|
||||
// Verify that the storage points to a parsable object in heap
|
||||
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
|
||||
assert(!is_in_young(old_obj),
|
||||
"Else should have been filtered in on_slowpath_allocation_exit()");
|
||||
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
|
||||
assert(deferred.word_size() == (size_t)(old_obj->size()),
|
||||
"Mismatch: multiple objects?");
|
||||
}
|
||||
write_region(deferred);
|
||||
// "Clear" the deferred_card_mark field
|
||||
thread->set_deferred_card_mark(MemRegion());
|
||||
}
|
||||
assert(thread->deferred_card_mark().is_empty(), "invariant");
|
||||
#else
|
||||
assert(!_defer_initial_card_mark, "Should be false");
|
||||
assert(thread->deferred_card_mark().is_empty(), "Should be empty");
|
||||
#endif
|
||||
}
|
||||
|
||||
void CardTableModRefBS::flush_deferred_barriers(JavaThread* thread) {
|
||||
// The deferred store barriers must all have been flushed to the
|
||||
// card-table (or other remembered set structure) before GC starts
|
||||
// processing the card-table (or other remembered set).
|
||||
flush_deferred_card_mark_barrier(thread);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -58,6 +58,10 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
|||
CT_MR_BS_last_reserved = 16
|
||||
};
|
||||
|
||||
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
|
||||
// or INCLUDE_JVMCI is being used
|
||||
bool _defer_initial_card_mark;
|
||||
|
||||
// a word's worth (row) of clean card values
|
||||
static const intptr_t clean_card_row = (intptr_t)(-1);
|
||||
|
||||
|
@ -180,8 +184,8 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
|||
CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
|
||||
~CardTableModRefBS();
|
||||
|
||||
protected:
|
||||
void write_region_work(MemRegion mr) {
|
||||
public:
|
||||
void write_region(MemRegion mr) {
|
||||
dirty_MemRegion(mr);
|
||||
}
|
||||
|
||||
|
@ -314,6 +318,49 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
|||
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||
|
||||
// ReduceInitialCardMarks
|
||||
void initialize_deferred_card_mark_barriers();
|
||||
|
||||
// If the CollectedHeap was asked to defer a store barrier above,
|
||||
// this informs it to flush such a deferred store barrier to the
|
||||
// remembered set.
|
||||
void flush_deferred_card_mark_barrier(JavaThread* thread);
|
||||
|
||||
// Can a compiler initialize a new object without store barriers?
|
||||
// This permission only extends from the creation of a new object
|
||||
// via a TLAB up to the first subsequent safepoint. If such permission
|
||||
// is granted for this heap type, the compiler promises to call
|
||||
// defer_store_barrier() below on any slow path allocation of
|
||||
// a new object for which such initializing store barriers will
|
||||
// have been elided. G1, like CMS, allows this, but should be
|
||||
// ready to provide a compensating write barrier as necessary
|
||||
// if that storage came out of a non-young region. The efficiency
|
||||
// of this implementation depends crucially on being able to
|
||||
// answer very efficiently in constant time whether a piece of
|
||||
// storage in the heap comes from a young region or not.
|
||||
// See ReduceInitialCardMarks.
|
||||
virtual bool can_elide_tlab_store_barriers() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If a compiler is eliding store barriers for TLAB-allocated objects,
|
||||
// we will be informed of a slow-path allocation by a call
|
||||
// to on_slowpath_allocation_exit() below. Such a call precedes the
|
||||
// initialization of the object itself, and no post-store-barriers will
|
||||
// be issued. Some heap types require that the barrier strictly follows
|
||||
// the initializing stores. (This is currently implemented by deferring the
|
||||
// barrier until the next slow-path allocation or gc-related safepoint.)
|
||||
// This interface answers whether a particular barrier type needs the card
|
||||
// mark to be thus strictly sequenced after the stores.
|
||||
virtual bool card_mark_must_follow_store() const = 0;
|
||||
|
||||
virtual bool is_in_young(oop obj) const = 0;
|
||||
|
||||
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
|
||||
virtual void flush_deferred_barriers(JavaThread* thread);
|
||||
|
||||
virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
|
||||
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
|
||||
};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -121,3 +121,6 @@ void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
|
|||
}
|
||||
}
|
||||
|
||||
bool CardTableModRefBSForCTRS::is_in_young(oop obj) const {
|
||||
return GenCollectedHeap::heap()->is_in_young(obj);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -43,6 +43,12 @@ public:
|
|||
|
||||
void set_CTRS(CardTableRS* rs) { _rs = rs; }
|
||||
|
||||
virtual bool card_mark_must_follow_store() const {
|
||||
return UseConcMarkSweepGC;
|
||||
}
|
||||
|
||||
virtual bool is_in_young(oop obj) const;
|
||||
|
||||
private:
|
||||
CardTableRS* _rs;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -177,8 +177,7 @@ CollectedHeap::CollectedHeap() :
|
|||
_total_collections(0),
|
||||
_total_full_collections(0),
|
||||
_gc_cause(GCCause::_no_gc),
|
||||
_gc_lastcause(GCCause::_no_gc),
|
||||
_defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
|
||||
_gc_lastcause(GCCause::_no_gc)
|
||||
{
|
||||
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
|
||||
const size_t elements_per_word = HeapWordSize / sizeof(jint);
|
||||
|
@ -239,17 +238,6 @@ void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
|
|||
BarrierSet::set_bs(barrier_set);
|
||||
}
|
||||
|
||||
void CollectedHeap::pre_initialize() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
|
||||
// otherwise remains unused.
|
||||
#if COMPILER2_OR_JVMCI
|
||||
_defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
|
||||
&& (DeferInitialCardMark || card_mark_must_follow_store());
|
||||
#else
|
||||
assert(_defer_initial_card_mark == false, "Who would set it?");
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
|
||||
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
|
||||
|
@ -333,28 +321,6 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
|
|||
return obj;
|
||||
}
|
||||
|
||||
void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
|
||||
MemRegion deferred = thread->deferred_card_mark();
|
||||
if (!deferred.is_empty()) {
|
||||
assert(_defer_initial_card_mark, "Otherwise should be empty");
|
||||
{
|
||||
// Verify that the storage points to a parsable object in heap
|
||||
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
|
||||
assert(is_in(old_obj), "Not in allocated heap");
|
||||
assert(!can_elide_initializing_store_barrier(old_obj),
|
||||
"Else should have been filtered in new_store_pre_barrier()");
|
||||
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
|
||||
assert(deferred.word_size() == (size_t)(old_obj->size()),
|
||||
"Mismatch: multiple objects?");
|
||||
}
|
||||
BarrierSet* bs = barrier_set();
|
||||
bs->write_region(deferred);
|
||||
// "Clear" the deferred_card_mark field
|
||||
thread->set_deferred_card_mark(MemRegion());
|
||||
}
|
||||
assert(thread->deferred_card_mark().is_empty(), "invariant");
|
||||
}
|
||||
|
||||
size_t CollectedHeap::max_tlab_size() const {
|
||||
// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
|
||||
// This restriction could be removed by enabling filling with multiple arrays.
|
||||
|
@ -370,72 +336,6 @@ size_t CollectedHeap::max_tlab_size() const {
|
|||
return align_down(max_int_size, MinObjAlignment);
|
||||
}
|
||||
|
||||
// Helper for ReduceInitialCardMarks. For performance,
|
||||
// compiled code may elide card-marks for initializing stores
|
||||
// to a newly allocated object along the fast-path. We
|
||||
// compensate for such elided card-marks as follows:
|
||||
// (a) Generational, non-concurrent collectors, such as
|
||||
// GenCollectedHeap(ParNew,DefNew,Tenured) and
|
||||
// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
|
||||
// need the card-mark if and only if the region is
|
||||
// in the old gen, and do not care if the card-mark
|
||||
// succeeds or precedes the initializing stores themselves,
|
||||
// so long as the card-mark is completed before the next
|
||||
// scavenge. For all these cases, we can do a card mark
|
||||
// at the point at which we do a slow path allocation
|
||||
// in the old gen, i.e. in this call.
|
||||
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
|
||||
// in addition that the card-mark for an old gen allocated
|
||||
// object strictly follow any associated initializing stores.
|
||||
// In these cases, the memRegion remembered below is
|
||||
// used to card-mark the entire region either just before the next
|
||||
// slow-path allocation by this thread or just before the next scavenge or
|
||||
// CMS-associated safepoint, whichever of these events happens first.
|
||||
// (The implicit assumption is that the object has been fully
|
||||
// initialized by this point, a fact that we assert when doing the
|
||||
// card-mark.)
|
||||
// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
|
||||
// G1 concurrent marking is in progress an SATB (pre-write-)barrier
|
||||
// is used to remember the pre-value of any store. Initializing
|
||||
// stores will not need this barrier, so we need not worry about
|
||||
// compensating for the missing pre-barrier here. Turning now
|
||||
// to the post-barrier, we note that G1 needs a RS update barrier
|
||||
// which simply enqueues a (sequence of) dirty cards which may
|
||||
// optionally be refined by the concurrent update threads. Note
|
||||
// that this barrier need only be applied to a non-young write,
|
||||
// but, like in CMS, because of the presence of concurrent refinement
|
||||
// (much like CMS' precleaning), must strictly follow the oop-store.
|
||||
// Thus, using the same protocol for maintaining the intended
|
||||
// invariants turns out, serendepitously, to be the same for both
|
||||
// G1 and CMS.
|
||||
//
|
||||
// For any future collector, this code should be reexamined with
|
||||
// that specific collector in mind, and the documentation above suitably
|
||||
// extended and updated.
|
||||
oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
|
||||
// If a previous card-mark was deferred, flush it now.
|
||||
flush_deferred_store_barrier(thread);
|
||||
if (can_elide_initializing_store_barrier(new_obj) ||
|
||||
new_obj->is_typeArray()) {
|
||||
// Arrays of non-references don't need a pre-barrier.
|
||||
// The deferred_card_mark region should be empty
|
||||
// following the flush above.
|
||||
assert(thread->deferred_card_mark().is_empty(), "Error");
|
||||
} else {
|
||||
MemRegion mr((HeapWord*)new_obj, new_obj->size());
|
||||
assert(!mr.is_empty(), "Error");
|
||||
if (_defer_initial_card_mark) {
|
||||
// Defer the card mark
|
||||
thread->set_deferred_card_mark(mr);
|
||||
} else {
|
||||
// Do the card mark
|
||||
BarrierSet* bs = barrier_set();
|
||||
bs->write_region(mr);
|
||||
}
|
||||
}
|
||||
return new_obj;
|
||||
}
|
||||
|
||||
size_t CollectedHeap::filler_array_hdr_size() {
|
||||
return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
|
||||
}
|
||||
|
@ -538,24 +438,16 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
|
|||
" otherwise concurrent mutator activity may make heap "
|
||||
" unparsable again");
|
||||
const bool use_tlab = UseTLAB;
|
||||
const bool deferred = _defer_initial_card_mark;
|
||||
// The main thread starts allocating via a TLAB even before it
|
||||
// has added itself to the threads list at vm boot-up.
|
||||
JavaThreadIteratorWithHandle jtiwh;
|
||||
assert(!use_tlab || jtiwh.length() > 0,
|
||||
"Attempt to fill tlabs before main thread has been added"
|
||||
" to threads list is doomed to failure!");
|
||||
BarrierSet *bs = barrier_set();
|
||||
for (; JavaThread *thread = jtiwh.next(); ) {
|
||||
if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
|
||||
#if COMPILER2_OR_JVMCI
|
||||
// The deferred store barriers must all have been flushed to the
|
||||
// card-table (or other remembered set structure) before GC starts
|
||||
// processing the card-table (or other remembered set).
|
||||
if (deferred) flush_deferred_store_barrier(thread);
|
||||
#else
|
||||
assert(!deferred, "Should be false");
|
||||
assert(thread->deferred_card_mark().is_empty(), "Should be empty");
|
||||
#endif
|
||||
bs->make_parsable(thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -101,10 +101,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||
|
||||
GCHeapLog* _gc_heap_log;
|
||||
|
||||
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
|
||||
// or INCLUDE_JVMCI is being used
|
||||
bool _defer_initial_card_mark;
|
||||
|
||||
MemRegion _reserved;
|
||||
|
||||
protected:
|
||||
|
@ -129,13 +125,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||
// Constructor
|
||||
CollectedHeap();
|
||||
|
||||
// Do common initializations that must follow instance construction,
|
||||
// for example, those needing virtual calls.
|
||||
// This code could perhaps be moved into initialize() but would
|
||||
// be slightly more awkward because we want the latter to be a
|
||||
// pure virtual.
|
||||
void pre_initialize();
|
||||
|
||||
// Create a new tlab. All TLAB allocations must go through this.
|
||||
virtual HeapWord* allocate_new_tlab(size_t size);
|
||||
|
||||
|
@ -408,45 +397,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Can a compiler initialize a new object without store barriers?
|
||||
// This permission only extends from the creation of a new object
|
||||
// via a TLAB up to the first subsequent safepoint. If such permission
|
||||
// is granted for this heap type, the compiler promises to call
|
||||
// defer_store_barrier() below on any slow path allocation of
|
||||
// a new object for which such initializing store barriers will
|
||||
// have been elided.
|
||||
virtual bool can_elide_tlab_store_barriers() const = 0;
|
||||
|
||||
// If a compiler is eliding store barriers for TLAB-allocated objects,
|
||||
// there is probably a corresponding slow path which can produce
|
||||
// an object allocated anywhere. The compiler's runtime support
|
||||
// promises to call this function on such a slow-path-allocated
|
||||
// object before performing initializations that have elided
|
||||
// store barriers. Returns new_obj, or maybe a safer copy thereof.
|
||||
virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
|
||||
|
||||
// Answers whether an initializing store to a new object currently
|
||||
// allocated at the given address doesn't need a store
|
||||
// barrier. Returns "true" if it doesn't need an initializing
|
||||
// store barrier; answers "false" if it does.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
|
||||
|
||||
// If a compiler is eliding store barriers for TLAB-allocated objects,
|
||||
// we will be informed of a slow-path allocation by a call
|
||||
// to new_store_pre_barrier() above. Such a call precedes the
|
||||
// initialization of the object itself, and no post-store-barriers will
|
||||
// be issued. Some heap types require that the barrier strictly follows
|
||||
// the initializing stores. (This is currently implemented by deferring the
|
||||
// barrier until the next slow-path allocation or gc-related safepoint.)
|
||||
// This interface answers whether a particular heap type needs the card
|
||||
// mark to be thus strictly sequenced after the stores.
|
||||
virtual bool card_mark_must_follow_store() const = 0;
|
||||
|
||||
// If the CollectedHeap was asked to defer a store barrier above,
|
||||
// this informs it to flush such a deferred store barrier to the
|
||||
// remembered set.
|
||||
virtual void flush_deferred_store_barrier(JavaThread* thread);
|
||||
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
// "System.gc". This probably implies as full a collection as the
|
||||
// "CollectedHeap" supports.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -71,8 +71,6 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
|
|||
}
|
||||
|
||||
jint GenCollectedHeap::initialize() {
|
||||
CollectedHeap::pre_initialize();
|
||||
|
||||
// While there are no constraints in the GC code that HeapWordSize
|
||||
// be any particular value, there are multiple other areas in the
|
||||
// system which believe this to be true (e.g. oop->object_size in some
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -270,22 +270,6 @@ public:
|
|||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||
virtual HeapWord* allocate_new_tlab(size_t size);
|
||||
|
||||
// Can a compiler initialize a new object without store barriers?
|
||||
// This permission only extends from the creation of a new object
|
||||
// via a TLAB up to the first subsequent safepoint.
|
||||
virtual bool can_elide_tlab_store_barriers() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
// We don't need barriers for stores to objects in the
|
||||
// young gen and, a fortiori, for initializing stores to
|
||||
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
|
||||
// only and may need to be re-examined in case other
|
||||
// kinds of collectors are implemented in the future.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
|
||||
// The "requestor" generation is performing some garbage collection
|
||||
// action for which it would be useful to have scratch space. The
|
||||
// requestor promises to allocate no more than "max_alloc_words" in any
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -45,6 +45,7 @@ public:
|
|||
|
||||
// Causes all refs in "mr" to be assumed to be modified.
|
||||
virtual void invalidate(MemRegion mr) = 0;
|
||||
virtual void write_region(MemRegion mr) = 0;
|
||||
|
||||
// The caller guarantees that "mr" contains no references. (Perhaps it's
|
||||
// objects have been moved elsewhere.)
|
||||
|
|
|
@ -116,10 +116,7 @@ JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance(JavaThread* thread, Klass* klas
|
|||
oop obj = ik->allocate_instance(CHECK);
|
||||
thread->set_vm_result(obj);
|
||||
JRT_BLOCK_END;
|
||||
|
||||
if (ReduceInitialCardMarks) {
|
||||
new_store_pre_barrier(thread);
|
||||
}
|
||||
SharedRuntime::on_slowpath_allocation_exit(thread);
|
||||
JRT_END
|
||||
|
||||
JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_klass, jint length))
|
||||
|
@ -151,29 +148,9 @@ JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_k
|
|||
}
|
||||
}
|
||||
JRT_BLOCK_END;
|
||||
|
||||
if (ReduceInitialCardMarks) {
|
||||
new_store_pre_barrier(thread);
|
||||
}
|
||||
SharedRuntime::on_slowpath_allocation_exit(thread);
|
||||
JRT_END
|
||||
|
||||
void JVMCIRuntime::new_store_pre_barrier(JavaThread* thread) {
|
||||
// After any safepoint, just before going back to compiled code,
|
||||
// we inform the GC that we will be doing initializing writes to
|
||||
// this object in the future without emitting card-marks, so
|
||||
// GC may take any compensating steps.
|
||||
// NOTE: Keep this code consistent with GraphKit::store_barrier.
|
||||
|
||||
oop new_obj = thread->vm_result();
|
||||
if (new_obj == NULL) return;
|
||||
|
||||
assert(Universe::heap()->can_elide_tlab_store_barriers(),
|
||||
"compiler must check this first");
|
||||
// GC may decide to give back a safer copy of new_obj.
|
||||
new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
|
||||
thread->set_vm_result(new_obj);
|
||||
}
|
||||
|
||||
JRT_ENTRY(void, JVMCIRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
|
||||
assert(klass->is_klass(), "not a class");
|
||||
assert(rank >= 1, "rank must be nonzero");
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -154,7 +154,6 @@ class JVMCIRuntime: public AllStatic {
|
|||
static void write_barrier_pre(JavaThread* thread, oopDesc* obj);
|
||||
static void write_barrier_post(JavaThread* thread, void* card);
|
||||
static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);
|
||||
static void new_store_pre_barrier(JavaThread* thread);
|
||||
|
||||
// used to throw exceptions from compiled JVMCI code
|
||||
static void throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message);
|
||||
|
|
|
@ -3861,7 +3861,7 @@ void GraphKit::write_barrier_post(Node* oop_store,
|
|||
if (use_ReduceInitialCardMarks()
|
||||
&& obj == just_allocated_object(control())) {
|
||||
// We can skip marks on a freshly-allocated object in Eden.
|
||||
// Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
|
||||
// Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
|
||||
// That routine informs GC to take appropriate compensating steps,
|
||||
// upon a slow-path allocation, so as to make this card-mark
|
||||
// elision safe.
|
||||
|
@ -4159,7 +4159,7 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
|
|||
* as part of the allocation in the case the allocated object is not located
|
||||
* in the nursery, this would happen for humongous objects. This is similar to
|
||||
* how CMS is required to handle this case, see the comments for the method
|
||||
* CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier.
|
||||
* CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier.
|
||||
* A deferred card mark is required for these objects and handled in the above
|
||||
* mentioned methods.
|
||||
*
|
||||
|
@ -4249,7 +4249,7 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
|
|||
|
||||
if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
|
||||
// We can skip marks on a freshly-allocated object in Eden.
|
||||
// Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
|
||||
// Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
|
||||
// That routine informs GC to take appropriate compensating steps,
|
||||
// upon a slow-path allocation, so as to make this card-mark
|
||||
// elision safe.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -755,8 +755,10 @@ class GraphKit : public Phase {
|
|||
Node* just_allocated_object(Node* current_control);
|
||||
|
||||
static bool use_ReduceInitialCardMarks() {
|
||||
return (ReduceInitialCardMarks
|
||||
&& Universe::heap()->can_elide_tlab_store_barriers());
|
||||
BarrierSet *bs = Universe::heap()->barrier_set();
|
||||
return bs->is_a(BarrierSet::CardTableModRef)
|
||||
&& barrier_set_cast<CardTableModRefBS>(bs)->can_elide_tlab_store_barriers()
|
||||
&& ReduceInitialCardMarks;
|
||||
}
|
||||
|
||||
// Sync Ideal and Graph kits.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -194,23 +194,6 @@ const char* OptoRuntime::stub_name(address entry) {
|
|||
// We failed the fast-path allocation. Now we need to do a scavenge or GC
|
||||
// and try allocation again.
|
||||
|
||||
void OptoRuntime::new_store_pre_barrier(JavaThread* thread) {
|
||||
// After any safepoint, just before going back to compiled code,
|
||||
// we inform the GC that we will be doing initializing writes to
|
||||
// this object in the future without emitting card-marks, so
|
||||
// GC may take any compensating steps.
|
||||
// NOTE: Keep this code consistent with GraphKit::store_barrier.
|
||||
|
||||
oop new_obj = thread->vm_result();
|
||||
if (new_obj == NULL) return;
|
||||
|
||||
assert(Universe::heap()->can_elide_tlab_store_barriers(),
|
||||
"compiler must check this first");
|
||||
// GC may decide to give back a safer copy of new_obj.
|
||||
new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
|
||||
thread->set_vm_result(new_obj);
|
||||
}
|
||||
|
||||
// object allocation
|
||||
JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread))
|
||||
JRT_BLOCK;
|
||||
|
@ -244,10 +227,8 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thre
|
|||
deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
|
||||
JRT_BLOCK_END;
|
||||
|
||||
if (GraphKit::use_ReduceInitialCardMarks()) {
|
||||
// inform GC that we won't do card marks for initializing writes.
|
||||
new_store_pre_barrier(thread);
|
||||
}
|
||||
// inform GC that we won't do card marks for initializing writes.
|
||||
SharedRuntime::on_slowpath_allocation_exit(thread);
|
||||
JRT_END
|
||||
|
||||
|
||||
|
@ -284,10 +265,8 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaT
|
|||
thread->set_vm_result(result);
|
||||
JRT_BLOCK_END;
|
||||
|
||||
if (GraphKit::use_ReduceInitialCardMarks()) {
|
||||
// inform GC that we won't do card marks for initializing writes.
|
||||
new_store_pre_barrier(thread);
|
||||
}
|
||||
// inform GC that we won't do card marks for initializing writes.
|
||||
SharedRuntime::on_slowpath_allocation_exit(thread);
|
||||
JRT_END
|
||||
|
||||
// array allocation without zeroing
|
||||
|
@ -314,10 +293,9 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len
|
|||
thread->set_vm_result(result);
|
||||
JRT_BLOCK_END;
|
||||
|
||||
if (GraphKit::use_ReduceInitialCardMarks()) {
|
||||
// inform GC that we won't do card marks for initializing writes.
|
||||
new_store_pre_barrier(thread);
|
||||
}
|
||||
|
||||
// inform GC that we won't do card marks for initializing writes.
|
||||
SharedRuntime::on_slowpath_allocation_exit(thread);
|
||||
|
||||
oop result = thread->vm_result();
|
||||
if ((len > 0) && (result != NULL) &&
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -163,10 +163,6 @@ class OptoRuntime : public AllStatic {
|
|||
static void new_array_C(Klass* array_klass, int len, JavaThread *thread);
|
||||
static void new_array_nozero_C(Klass* array_klass, int len, JavaThread *thread);
|
||||
|
||||
// Post-slow-path-allocation, pre-initializing-stores step for
|
||||
// implementing ReduceInitialCardMarks
|
||||
static void new_store_pre_barrier(JavaThread* thread);
|
||||
|
||||
// Allocate storage for a multi-dimensional arrays
|
||||
// Note: needs to be fixed for arbitrary number of dimensions
|
||||
static void multianewarray2_C(Klass* klass, int len1, int len2, JavaThread *thread);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -3169,3 +3169,16 @@ frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread
|
|||
}
|
||||
return activation;
|
||||
}
|
||||
|
||||
void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) {
|
||||
// After any safepoint, just before going back to compiled code,
|
||||
// we inform the GC that we will be doing initializing writes to
|
||||
// this object in the future without emitting card-marks, so
|
||||
// GC may take any compensating steps.
|
||||
|
||||
oop new_obj = thread->vm_result();
|
||||
if (new_obj == NULL) return;
|
||||
|
||||
BarrierSet *bs = Universe::heap()->barrier_set();
|
||||
bs->on_slowpath_allocation_exit(thread, new_obj);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -211,6 +211,10 @@ class SharedRuntime: AllStatic {
|
|||
static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason);
|
||||
#endif
|
||||
|
||||
// Post-slow-path-allocation, pre-initializing-stores step for
|
||||
// implementing e.g. ReduceInitialCardMarks
|
||||
static void on_slowpath_allocation_exit(JavaThread* thread);
|
||||
|
||||
static void enable_stack_reserved_zone(JavaThread* thread);
|
||||
static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr);
|
||||
|
||||
|
|
|
@ -1994,20 +1994,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
|
|||
JvmtiExport::cleanup_thread(this);
|
||||
}
|
||||
|
||||
// We must flush any deferred card marks before removing a thread from
|
||||
// the list of active threads.
|
||||
Universe::heap()->flush_deferred_store_barrier(this);
|
||||
assert(deferred_card_mark().is_empty(), "Should have been flushed");
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// We must flush the G1-related buffers before removing a thread
|
||||
// from the list of active threads. We must do this after any deferred
|
||||
// card marks have been flushed (above) so that any entries that are
|
||||
// added to the thread's dirty card queue as a result are not lost.
|
||||
if (UseG1GC) {
|
||||
flush_barrier_queues();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
// We must flush any deferred card marks and other various GC barrier
|
||||
// related buffers (e.g. G1 SATB buffer and G1 dirty card queue buffer)
|
||||
// before removing a thread from the list of active threads.
|
||||
BarrierSet::barrier_set()->flush_deferred_barriers(this);
|
||||
|
||||
log_info(os, thread)("JavaThread %s (tid: " UINTX_FORMAT ").",
|
||||
exit_type == JavaThread::normal_exit ? "exiting" : "detaching",
|
||||
|
|
|
@ -466,6 +466,7 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
|
|||
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
|
||||
nonstatic_field(CardGeneration, _used_at_prologue, size_t) \
|
||||
\
|
||||
nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \
|
||||
nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \
|
||||
nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \
|
||||
nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \
|
||||
|
@ -482,7 +483,6 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
|
|||
\
|
||||
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
|
||||
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
|
||||
nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \
|
||||
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
|
||||
nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
|
||||
\
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue