mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 14:24:46 +02:00
7023069: G1: Introduce symmetric locking in the slow allocation path
7023151: G1: refactor the code that operates on _cur_alloc_region to be re-used for allocs by the GC threads 7018286: G1: humongous allocation attempts should take the GC locker into account First, this change replaces the asymmetric locking scheme in the G1 slow alloc path by a summetric one. Second, it factors out the code that operates on _cur_alloc_region so that it can be re-used for allocations by the GC threads in the future. Reviewed-by: stefank, brutisso, johnc
This commit is contained in:
parent
349d820dd1
commit
3e9fe24ddd
11 changed files with 920 additions and 747 deletions
208
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
Normal file
208
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
Normal file
|
@ -0,0 +1,208 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
|
|
||||||
|
G1CollectedHeap* G1AllocRegion::_g1h = NULL;
|
||||||
|
HeapRegion* G1AllocRegion::_dummy_region = NULL;
|
||||||
|
|
||||||
|
void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
|
||||||
|
assert(_dummy_region == NULL, "should be set once");
|
||||||
|
assert(dummy_region != NULL, "pre-condition");
|
||||||
|
assert(dummy_region->free() == 0, "pre-condition");
|
||||||
|
|
||||||
|
// Make sure that any allocation attempt on this region will fail
|
||||||
|
// and will not trigger any asserts.
|
||||||
|
assert(allocate(dummy_region, 1, false) == NULL, "should fail");
|
||||||
|
assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
|
||||||
|
assert(allocate(dummy_region, 1, true) == NULL, "should fail");
|
||||||
|
assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
|
||||||
|
|
||||||
|
_g1h = g1h;
|
||||||
|
_dummy_region = dummy_region;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
|
||||||
|
bool bot_updates) {
|
||||||
|
assert(alloc_region != NULL && alloc_region != _dummy_region,
|
||||||
|
"pre-condition");
|
||||||
|
|
||||||
|
// Other threads might still be trying to allocate using a CAS out
|
||||||
|
// of the region we are trying to retire, as they can do so without
|
||||||
|
// holding the lock. So, we first have to make sure that noone else
|
||||||
|
// can allocate out of it by doing a maximal allocation. Even if our
|
||||||
|
// CAS attempt fails a few times, we'll succeed sooner or later
|
||||||
|
// given that failed CAS attempts mean that the region is getting
|
||||||
|
// closed to being full.
|
||||||
|
size_t free_word_size = alloc_region->free() / HeapWordSize;
|
||||||
|
|
||||||
|
// This is the minimum free chunk we can turn into a dummy
|
||||||
|
// object. If the free space falls below this, then noone can
|
||||||
|
// allocate in this region anyway (all allocation requests will be
|
||||||
|
// of a size larger than this) so we won't have to perform the dummy
|
||||||
|
// allocation.
|
||||||
|
size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
|
||||||
|
|
||||||
|
while (free_word_size >= min_word_size_to_fill) {
|
||||||
|
HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
|
||||||
|
if (dummy != NULL) {
|
||||||
|
// If the allocation was successful we should fill in the space.
|
||||||
|
CollectedHeap::fill_with_object(dummy, free_word_size);
|
||||||
|
alloc_region->set_pre_dummy_top(dummy);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
free_word_size = alloc_region->free() / HeapWordSize;
|
||||||
|
// It's also possible that someone else beats us to the
|
||||||
|
// allocation and they fill up the region. In that case, we can
|
||||||
|
// just get out of the loop.
|
||||||
|
}
|
||||||
|
assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
|
||||||
|
"post-condition");
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1AllocRegion::retire(bool fill_up) {
|
||||||
|
assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
|
||||||
|
|
||||||
|
trace("retiring");
|
||||||
|
HeapRegion* alloc_region = _alloc_region;
|
||||||
|
if (alloc_region != _dummy_region) {
|
||||||
|
// We never have to check whether the active region is empty or not,
|
||||||
|
// and potentially free it if it is, given that it's guaranteed that
|
||||||
|
// it will never be empty.
|
||||||
|
assert(!alloc_region->is_empty(),
|
||||||
|
ar_ext_msg(this, "the alloc region should never be empty"));
|
||||||
|
|
||||||
|
if (fill_up) {
|
||||||
|
fill_up_remaining_space(alloc_region, _bot_updates);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(alloc_region->used() >= _used_bytes_before,
|
||||||
|
ar_ext_msg(this, "invariant"));
|
||||||
|
size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
|
||||||
|
retire_region(alloc_region, allocated_bytes);
|
||||||
|
_used_bytes_before = 0;
|
||||||
|
_alloc_region = _dummy_region;
|
||||||
|
}
|
||||||
|
trace("retired");
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||||
|
bool force) {
|
||||||
|
assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
|
||||||
|
assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
|
||||||
|
|
||||||
|
trace("attempting region allocation");
|
||||||
|
HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
|
||||||
|
if (new_alloc_region != NULL) {
|
||||||
|
new_alloc_region->reset_pre_dummy_top();
|
||||||
|
// Need to do this before the allocation
|
||||||
|
_used_bytes_before = new_alloc_region->used();
|
||||||
|
HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
|
||||||
|
assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
|
||||||
|
|
||||||
|
OrderAccess::storestore();
|
||||||
|
// Note that we first perform the allocation and then we store the
|
||||||
|
// region in _alloc_region. This is the reason why an active region
|
||||||
|
// can never be empty.
|
||||||
|
_alloc_region = new_alloc_region;
|
||||||
|
trace("region allocation successful");
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
trace("region allocation failed");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
|
||||||
|
msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
|
||||||
|
_name, message, BOOL_TO_STR(_bot_updates),
|
||||||
|
_alloc_region, _used_bytes_before);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1AllocRegion::init() {
|
||||||
|
trace("initializing");
|
||||||
|
assert(_alloc_region == NULL && _used_bytes_before == 0,
|
||||||
|
ar_ext_msg(this, "pre-condition"));
|
||||||
|
assert(_dummy_region != NULL, "should have been set");
|
||||||
|
_alloc_region = _dummy_region;
|
||||||
|
trace("initialized");
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegion* G1AllocRegion::release() {
|
||||||
|
trace("releasing");
|
||||||
|
HeapRegion* alloc_region = _alloc_region;
|
||||||
|
retire(false /* fill_up */);
|
||||||
|
assert(_alloc_region == _dummy_region, "post-condition of retire()");
|
||||||
|
_alloc_region = NULL;
|
||||||
|
trace("released");
|
||||||
|
return (alloc_region == _dummy_region) ? NULL : alloc_region;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if G1_ALLOC_REGION_TRACING
|
||||||
|
void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
|
||||||
|
// All the calls to trace that set either just the size or the size
|
||||||
|
// and the result are considered part of level 2 tracing and are
|
||||||
|
// skipped during level 1 tracing.
|
||||||
|
if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
|
||||||
|
const size_t buffer_length = 128;
|
||||||
|
char hr_buffer[buffer_length];
|
||||||
|
char rest_buffer[buffer_length];
|
||||||
|
|
||||||
|
HeapRegion* alloc_region = _alloc_region;
|
||||||
|
if (alloc_region == NULL) {
|
||||||
|
jio_snprintf(hr_buffer, buffer_length, "NULL");
|
||||||
|
} else if (alloc_region == _dummy_region) {
|
||||||
|
jio_snprintf(hr_buffer, buffer_length, "DUMMY");
|
||||||
|
} else {
|
||||||
|
jio_snprintf(hr_buffer, buffer_length,
|
||||||
|
HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (G1_ALLOC_REGION_TRACING > 1) {
|
||||||
|
if (result != NULL) {
|
||||||
|
jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT,
|
||||||
|
word_size, result);
|
||||||
|
} else if (word_size != 0) {
|
||||||
|
jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size);
|
||||||
|
} else {
|
||||||
|
jio_snprintf(rest_buffer, buffer_length, "");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
jio_snprintf(rest_buffer, buffer_length, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // G1_ALLOC_REGION_TRACING
|
||||||
|
|
||||||
|
G1AllocRegion::G1AllocRegion(const char* name,
|
||||||
|
bool bot_updates)
|
||||||
|
: _name(name), _bot_updates(bot_updates),
|
||||||
|
_alloc_region(NULL), _used_bytes_before(0) { }
|
||||||
|
|
174
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
Normal file
174
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/heapRegion.hpp"
|
||||||
|
|
||||||
|
class G1CollectedHeap;
|
||||||
|
|
||||||
|
// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
|
||||||
|
#define G1_ALLOC_REGION_TRACING 0
|
||||||
|
|
||||||
|
class ar_ext_msg;
|
||||||
|
|
||||||
|
// A class that holds a region that is active in satisfying allocation
|
||||||
|
// requests, potentially issued in parallel. When the active region is
|
||||||
|
// full it will be retired it replaced with a new one. The
|
||||||
|
// implementation assumes that fast-path allocations will be lock-free
|
||||||
|
// and a lock will need to be taken when the active region needs to be
|
||||||
|
// replaced.
|
||||||
|
|
||||||
|
class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
|
||||||
|
friend class ar_ext_msg;
|
||||||
|
|
||||||
|
private:
|
||||||
|
// The active allocating region we are currently allocating out
|
||||||
|
// of. The invariant is that if this object is initialized (i.e.,
|
||||||
|
// init() has been called and release() has not) then _alloc_region
|
||||||
|
// is either an active allocating region or the dummy region (i.e.,
|
||||||
|
// it can never be NULL) and this object can be used to satisfy
|
||||||
|
// allocation requests. If this object is not initialized
|
||||||
|
// (i.e. init() has not been called or release() has been called)
|
||||||
|
// then _alloc_region is NULL and this object should not be used to
|
||||||
|
// satisfy allocation requests (it was done this way to force the
|
||||||
|
// correct use of init() and release()).
|
||||||
|
HeapRegion* _alloc_region;
|
||||||
|
|
||||||
|
// When we set up a new active region we save its used bytes in this
|
||||||
|
// field so that, when we retire it, we can calculate how much space
|
||||||
|
// we allocated in it.
|
||||||
|
size_t _used_bytes_before;
|
||||||
|
|
||||||
|
// Specifies whether the allocate calls will do BOT updates or not.
|
||||||
|
bool _bot_updates;
|
||||||
|
|
||||||
|
// Useful for debugging and tracing.
|
||||||
|
const char* _name;
|
||||||
|
|
||||||
|
// A dummy region (i.e., it's been allocated specially for this
|
||||||
|
// purpose and it is not part of the heap) that is full (i.e., top()
|
||||||
|
// == end()). When we don't have a valid active region we make
|
||||||
|
// _alloc_region point to this. This allows us to skip checking
|
||||||
|
// whether the _alloc_region is NULL or not.
|
||||||
|
static HeapRegion* _dummy_region;
|
||||||
|
|
||||||
|
// Some of the methods below take a bot_updates parameter. Its value
|
||||||
|
// should be the same as the _bot_updates field. The idea is that
|
||||||
|
// the parameter will be a constant for a particular alloc region
|
||||||
|
// and, given that these methods will be hopefully inlined, the
|
||||||
|
// compiler should compile out the test.
|
||||||
|
|
||||||
|
// Perform a non-MT-safe allocation out of the given region.
|
||||||
|
static inline HeapWord* allocate(HeapRegion* alloc_region,
|
||||||
|
size_t word_size,
|
||||||
|
bool bot_updates);
|
||||||
|
|
||||||
|
// Perform a MT-safe allocation out of the given region.
|
||||||
|
static inline HeapWord* par_allocate(HeapRegion* alloc_region,
|
||||||
|
size_t word_size,
|
||||||
|
bool bot_updates);
|
||||||
|
|
||||||
|
// Ensure that the region passed as a parameter has been filled up
|
||||||
|
// so that noone else can allocate out of it any more.
|
||||||
|
static void fill_up_remaining_space(HeapRegion* alloc_region,
|
||||||
|
bool bot_updates);
|
||||||
|
|
||||||
|
// Retire the active allocating region. If fill_up is true then make
|
||||||
|
// sure that the region is full before we retire it so that noone
|
||||||
|
// else can allocate out of it.
|
||||||
|
void retire(bool fill_up);
|
||||||
|
|
||||||
|
// Allocate a new active region and use it to perform a word_size
|
||||||
|
// allocation. The force parameter will be passed on to
|
||||||
|
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
|
||||||
|
// to allocate a new region even if the max has been reached.
|
||||||
|
HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
|
||||||
|
|
||||||
|
void fill_in_ext_msg(ar_ext_msg* msg, const char* message);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// For convenience as subclasses use it.
|
||||||
|
static G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0;
|
||||||
|
virtual void retire_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes) = 0;
|
||||||
|
|
||||||
|
G1AllocRegion(const char* name, bool bot_updates);
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
|
||||||
|
|
||||||
|
HeapRegion* get() const {
|
||||||
|
// Make sure that the dummy region does not escape this class.
|
||||||
|
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following two are the building blocks for the allocation method.
|
||||||
|
|
||||||
|
// First-level allocation: Should be called without holding a
|
||||||
|
// lock. It will try to allocate lock-free out of the active region,
|
||||||
|
// or return NULL if it was unable to.
|
||||||
|
inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates);
|
||||||
|
|
||||||
|
// Second-level allocation: Should be called while holding a
|
||||||
|
// lock. It will try to first allocate lock-free out of the active
|
||||||
|
// region or, if it's unable to, it will try to replace the active
|
||||||
|
// alloc region with a new one. We require that the caller takes the
|
||||||
|
// appropriate lock before calling this so that it is easier to make
|
||||||
|
// it conform to its locking protocol.
|
||||||
|
inline HeapWord* attempt_allocation_locked(size_t word_size,
|
||||||
|
bool bot_updates);
|
||||||
|
|
||||||
|
// Should be called to allocate a new region even if the max of this
|
||||||
|
// type of regions has been reached. Should only be called if other
|
||||||
|
// allocation attempts have failed and we are not holding a valid
|
||||||
|
// active region.
|
||||||
|
inline HeapWord* attempt_allocation_force(size_t word_size,
|
||||||
|
bool bot_updates);
|
||||||
|
|
||||||
|
// Should be called before we start using this object.
|
||||||
|
void init();
|
||||||
|
|
||||||
|
// Should be called when we want to release the active region which
|
||||||
|
// is returned after it's been retired.
|
||||||
|
HeapRegion* release();
|
||||||
|
|
||||||
|
#if G1_ALLOC_REGION_TRACING
|
||||||
|
void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
|
||||||
|
#else // G1_ALLOC_REGION_TRACING
|
||||||
|
void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { }
|
||||||
|
#endif // G1_ALLOC_REGION_TRACING
|
||||||
|
};
|
||||||
|
|
||||||
|
class ar_ext_msg : public err_msg {
|
||||||
|
public:
|
||||||
|
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("") {
|
||||||
|
alloc_region->fill_in_ext_msg(this, message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||||
|
|
||||||
|
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
|
||||||
|
size_t word_size,
|
||||||
|
bool bot_updates) {
|
||||||
|
assert(alloc_region != NULL, err_msg("pre-condition"));
|
||||||
|
|
||||||
|
if (!bot_updates) {
|
||||||
|
return alloc_region->allocate_no_bot_updates(word_size);
|
||||||
|
} else {
|
||||||
|
return alloc_region->allocate(word_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
|
||||||
|
size_t word_size,
|
||||||
|
bool bot_updates) {
|
||||||
|
assert(alloc_region != NULL, err_msg("pre-condition"));
|
||||||
|
assert(!alloc_region->is_empty(), err_msg("pre-condition"));
|
||||||
|
|
||||||
|
if (!bot_updates) {
|
||||||
|
return alloc_region->par_allocate_no_bot_updates(word_size);
|
||||||
|
} else {
|
||||||
|
return alloc_region->par_allocate(word_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size,
|
||||||
|
bool bot_updates) {
|
||||||
|
assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
|
||||||
|
|
||||||
|
HeapRegion* alloc_region = _alloc_region;
|
||||||
|
assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
|
||||||
|
|
||||||
|
HeapWord* result = par_allocate(alloc_region, word_size, bot_updates);
|
||||||
|
if (result != NULL) {
|
||||||
|
trace("alloc", word_size, result);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
trace("alloc failed", word_size);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
|
||||||
|
bool bot_updates) {
|
||||||
|
// First we have to tedo the allocation, assuming we're holding the
|
||||||
|
// appropriate lock, in case another thread changed the region while
|
||||||
|
// we were waiting to get the lock.
|
||||||
|
HeapWord* result = attempt_allocation(word_size, bot_updates);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
retire(true /* fill_up */);
|
||||||
|
result = new_alloc_region_and_allocate(word_size, false /* force */);
|
||||||
|
if (result != NULL) {
|
||||||
|
trace("alloc locked (second attempt)", word_size, result);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
trace("alloc locked failed", word_size);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size,
|
||||||
|
bool bot_updates) {
|
||||||
|
assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
|
||||||
|
assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
|
||||||
|
|
||||||
|
trace("forcing alloc");
|
||||||
|
HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
|
||||||
|
if (result != NULL) {
|
||||||
|
trace("alloc forced", word_size, result);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
trace("alloc forced failed", word_size);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
|
|
@ -28,6 +28,7 @@
|
||||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||||
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
|
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
|
||||||
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
||||||
|
@ -517,8 +518,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
|
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
|
||||||
bool do_expand) {
|
|
||||||
assert(!isHumongous(word_size) ||
|
assert(!isHumongous(word_size) ||
|
||||||
word_size <= (size_t) HeapRegion::GrainWords,
|
word_size <= (size_t) HeapRegion::GrainWords,
|
||||||
"the only time we use this to allocate a humongous region is "
|
"the only time we use this to allocate a humongous region is "
|
||||||
|
@ -566,7 +566,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
|
||||||
size_t word_size) {
|
size_t word_size) {
|
||||||
HeapRegion* alloc_region = NULL;
|
HeapRegion* alloc_region = NULL;
|
||||||
if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
|
if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
|
||||||
alloc_region = new_region_work(word_size, true /* do_expand */);
|
alloc_region = new_region(word_size, true /* do_expand */);
|
||||||
if (purpose == GCAllocForSurvived && alloc_region != NULL) {
|
if (purpose == GCAllocForSurvived && alloc_region != NULL) {
|
||||||
alloc_region->set_survivor();
|
alloc_region->set_survivor();
|
||||||
}
|
}
|
||||||
|
@ -587,7 +587,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
|
||||||
// Only one region to allocate, no need to go through the slower
|
// Only one region to allocate, no need to go through the slower
|
||||||
// path. The caller will attempt the expasion if this fails, so
|
// path. The caller will attempt the expasion if this fails, so
|
||||||
// let's not try to expand here too.
|
// let's not try to expand here too.
|
||||||
HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
|
HeapRegion* hr = new_region(word_size, false /* do_expand */);
|
||||||
if (hr != NULL) {
|
if (hr != NULL) {
|
||||||
first = hr->hrs_index();
|
first = hr->hrs_index();
|
||||||
} else {
|
} else {
|
||||||
|
@ -788,407 +788,12 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
|
|
||||||
// Other threads might still be trying to allocate using CASes out
|
|
||||||
// of the region we are retiring, as they can do so without holding
|
|
||||||
// the Heap_lock. So we first have to make sure that noone else can
|
|
||||||
// allocate in it by doing a maximal allocation. Even if our CAS
|
|
||||||
// attempt fails a few times, we'll succeed sooner or later given
|
|
||||||
// that a failed CAS attempt mean that the region is getting closed
|
|
||||||
// to being full (someone else succeeded in allocating into it).
|
|
||||||
size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
|
|
||||||
|
|
||||||
// This is the minimum free chunk we can turn into a dummy
|
|
||||||
// object. If the free space falls below this, then noone can
|
|
||||||
// allocate in this region anyway (all allocation requests will be
|
|
||||||
// of a size larger than this) so we won't have to perform the dummy
|
|
||||||
// allocation.
|
|
||||||
size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
|
|
||||||
|
|
||||||
while (free_word_size >= min_word_size_to_fill) {
|
|
||||||
HeapWord* dummy =
|
|
||||||
cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
|
|
||||||
if (dummy != NULL) {
|
|
||||||
// If the allocation was successful we should fill in the space.
|
|
||||||
CollectedHeap::fill_with_object(dummy, free_word_size);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
free_word_size = cur_alloc_region->free() / HeapWordSize;
|
|
||||||
// It's also possible that someone else beats us to the
|
|
||||||
// allocation and they fill up the region. In that case, we can
|
|
||||||
// just get out of the loop
|
|
||||||
}
|
|
||||||
assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
|
|
||||||
"sanity");
|
|
||||||
|
|
||||||
retire_cur_alloc_region_common(cur_alloc_region);
|
|
||||||
assert(_cur_alloc_region == NULL, "post-condition");
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the comment in the .hpp file about the locking protocol and
|
|
||||||
// assumptions of this method (and other related ones).
|
|
||||||
HeapWord*
|
|
||||||
G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
|
|
||||||
bool at_safepoint,
|
|
||||||
bool do_dirtying,
|
|
||||||
bool can_expand) {
|
|
||||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
assert(_cur_alloc_region == NULL,
|
|
||||||
"replace_cur_alloc_region_and_allocate() should only be called "
|
|
||||||
"after retiring the previous current alloc region");
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
|
|
||||||
"at_safepoint and is_at_safepoint() should be a tautology");
|
|
||||||
assert(!can_expand || g1_policy()->can_expand_young_list(),
|
|
||||||
"we should not call this method with can_expand == true if "
|
|
||||||
"we are not allowed to expand the young gen");
|
|
||||||
|
|
||||||
if (can_expand || !g1_policy()->is_young_list_full()) {
|
|
||||||
HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
|
|
||||||
if (new_cur_alloc_region != NULL) {
|
|
||||||
assert(new_cur_alloc_region->is_empty(),
|
|
||||||
"the newly-allocated region should be empty, "
|
|
||||||
"as right now we only allocate new regions out of the free list");
|
|
||||||
g1_policy()->update_region_num(true /* next_is_young */);
|
|
||||||
set_region_short_lived_locked(new_cur_alloc_region);
|
|
||||||
|
|
||||||
assert(!new_cur_alloc_region->isHumongous(),
|
|
||||||
"Catch a regression of this bug.");
|
|
||||||
|
|
||||||
// We need to ensure that the stores to _cur_alloc_region and,
|
|
||||||
// subsequently, to top do not float above the setting of the
|
|
||||||
// young type.
|
|
||||||
OrderAccess::storestore();
|
|
||||||
|
|
||||||
// Now, perform the allocation out of the region we just
|
|
||||||
// allocated. Note that noone else can access that region at
|
|
||||||
// this point (as _cur_alloc_region has not been updated yet),
|
|
||||||
// so we can just go ahead and do the allocation without any
|
|
||||||
// atomics (and we expect this allocation attempt to
|
|
||||||
// suceeded). Given that other threads can attempt an allocation
|
|
||||||
// with a CAS and without needing the Heap_lock, if we assigned
|
|
||||||
// the new region to _cur_alloc_region before first allocating
|
|
||||||
// into it other threads might have filled up the new region
|
|
||||||
// before we got a chance to do the allocation ourselves. In
|
|
||||||
// that case, we would have needed to retire the region, grab a
|
|
||||||
// new one, and go through all this again. Allocating out of the
|
|
||||||
// new region before assigning it to _cur_alloc_region avoids
|
|
||||||
// all this.
|
|
||||||
HeapWord* result =
|
|
||||||
new_cur_alloc_region->allocate_no_bot_updates(word_size);
|
|
||||||
assert(result != NULL, "we just allocate out of an empty region "
|
|
||||||
"so allocation should have been successful");
|
|
||||||
assert(is_in(result), "result should be in the heap");
|
|
||||||
|
|
||||||
// Now make sure that the store to _cur_alloc_region does not
|
|
||||||
// float above the store to top.
|
|
||||||
OrderAccess::storestore();
|
|
||||||
_cur_alloc_region = new_cur_alloc_region;
|
|
||||||
|
|
||||||
if (!at_safepoint) {
|
|
||||||
Heap_lock->unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
// do the dirtying, if necessary, after we release the Heap_lock
|
|
||||||
if (do_dirtying) {
|
|
||||||
dirty_young_block(result, word_size);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
|
|
||||||
"alloc region, it should still be NULL");
|
|
||||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the comment in the .hpp file about the locking protocol and
|
|
||||||
// assumptions of this method (and other related ones).
|
|
||||||
HeapWord*
|
|
||||||
G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
|
||||||
assert_heap_locked_and_not_at_safepoint();
|
|
||||||
assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
|
|
||||||
"used for humongous allocations");
|
|
||||||
|
|
||||||
// We should only reach here when we were unable to allocate
|
|
||||||
// otherwise. So, we should have not active current alloc region.
|
|
||||||
assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
|
|
||||||
|
|
||||||
// We will loop while succeeded is false, which means that we tried
|
|
||||||
// to do a collection, but the VM op did not succeed. So, when we
|
|
||||||
// exit the loop, either one of the allocation attempts was
|
|
||||||
// successful, or we succeeded in doing the VM op but which was
|
|
||||||
// unable to allocate after the collection.
|
|
||||||
for (int try_count = 1; /* we'll return or break */; try_count += 1) {
|
|
||||||
bool succeeded = true;
|
|
||||||
|
|
||||||
// Every time we go round the loop we should be holding the Heap_lock.
|
|
||||||
assert_heap_locked();
|
|
||||||
|
|
||||||
if (GC_locker::is_active_and_needs_gc()) {
|
|
||||||
// We are locked out of GC because of the GC locker. We can
|
|
||||||
// allocate a new region only if we can expand the young gen.
|
|
||||||
|
|
||||||
if (g1_policy()->can_expand_young_list()) {
|
|
||||||
// Yes, we are allowed to expand the young gen. Let's try to
|
|
||||||
// allocate a new current alloc region.
|
|
||||||
HeapWord* result =
|
|
||||||
replace_cur_alloc_region_and_allocate(word_size,
|
|
||||||
false, /* at_safepoint */
|
|
||||||
true, /* do_dirtying */
|
|
||||||
true /* can_expand */);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We could not expand the young gen further (or we could but we
|
|
||||||
// failed to allocate a new region). We'll stall until the GC
|
|
||||||
// locker forces a GC.
|
|
||||||
|
|
||||||
// If this thread is not in a jni critical section, we stall
|
|
||||||
// the requestor until the critical section has cleared and
|
|
||||||
// GC allowed. When the critical section clears, a GC is
|
|
||||||
// initiated by the last thread exiting the critical section; so
|
|
||||||
// we retry the allocation sequence from the beginning of the loop,
|
|
||||||
// rather than causing more, now probably unnecessary, GC attempts.
|
|
||||||
JavaThread* jthr = JavaThread::current();
|
|
||||||
assert(jthr != NULL, "sanity");
|
|
||||||
if (jthr->in_critical()) {
|
|
||||||
if (CheckJNICalls) {
|
|
||||||
fatal("Possible deadlock due to allocating while"
|
|
||||||
" in jni critical section");
|
|
||||||
}
|
|
||||||
// We are returning NULL so the protocol is that we're still
|
|
||||||
// holding the Heap_lock.
|
|
||||||
assert_heap_locked();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
Heap_lock->unlock();
|
|
||||||
GC_locker::stall_until_clear();
|
|
||||||
|
|
||||||
// No need to relock the Heap_lock. We'll fall off to the code
|
|
||||||
// below the else-statement which assumes that we are not
|
|
||||||
// holding the Heap_lock.
|
|
||||||
} else {
|
|
||||||
// We are not locked out. So, let's try to do a GC. The VM op
|
|
||||||
// will retry the allocation before it completes.
|
|
||||||
|
|
||||||
// Read the GC count while holding the Heap_lock
|
|
||||||
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
|
|
||||||
|
|
||||||
Heap_lock->unlock();
|
|
||||||
|
|
||||||
HeapWord* result =
|
|
||||||
do_collection_pause(word_size, gc_count_before, &succeeded);
|
|
||||||
assert_heap_not_locked();
|
|
||||||
if (result != NULL) {
|
|
||||||
assert(succeeded, "the VM op should have succeeded");
|
|
||||||
|
|
||||||
// Allocations that take place on VM operations do not do any
|
|
||||||
// card dirtying and we have to do it here.
|
|
||||||
dirty_young_block(result, word_size);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Both paths that get us here from above unlock the Heap_lock.
|
|
||||||
assert_heap_not_locked();
|
|
||||||
|
|
||||||
// We can reach here when we were unsuccessful in doing a GC,
|
|
||||||
// because another thread beat us to it, or because we were locked
|
|
||||||
// out of GC due to the GC locker. In either case a new alloc
|
|
||||||
// region might be available so we will retry the allocation.
|
|
||||||
HeapWord* result = attempt_allocation(word_size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// So far our attempts to allocate failed. The only time we'll go
|
|
||||||
// around the loop and try again is if we tried to do a GC and the
|
|
||||||
// VM op that we tried to schedule was not successful because
|
|
||||||
// another thread beat us to it. If that happened it's possible
|
|
||||||
// that by the time we grabbed the Heap_lock again and tried to
|
|
||||||
// allocate other threads filled up the young generation, which
|
|
||||||
// means that the allocation attempt after the GC also failed. So,
|
|
||||||
// it's worth trying to schedule another GC pause.
|
|
||||||
if (succeeded) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Give a warning if we seem to be looping forever.
|
|
||||||
if ((QueuedAllocationWarningCount > 0) &&
|
|
||||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
|
||||||
warning("G1CollectedHeap::attempt_allocation_slow() "
|
|
||||||
"retries %d times", try_count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the comment in the .hpp file about the locking protocol and
|
|
||||||
// assumptions of this method (and other related ones).
|
|
||||||
HeapWord*
|
|
||||||
G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
|
||||||
bool at_safepoint) {
|
|
||||||
// This is the method that will allocate a humongous object. All
|
|
||||||
// allocation paths that attempt to allocate a humongous object
|
|
||||||
// should eventually reach here. Currently, the only paths are from
|
|
||||||
// mem_allocate() and attempt_allocation_at_safepoint().
|
|
||||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
assert(isHumongous(word_size), "attempt_allocation_humongous() "
|
|
||||||
"should only be used for humongous allocations");
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
|
|
||||||
"at_safepoint and is_at_safepoint() should be a tautology");
|
|
||||||
|
|
||||||
HeapWord* result = NULL;
|
|
||||||
|
|
||||||
// We will loop while succeeded is false, which means that we tried
|
|
||||||
// to do a collection, but the VM op did not succeed. So, when we
|
|
||||||
// exit the loop, either one of the allocation attempts was
|
|
||||||
// successful, or we succeeded in doing the VM op but which was
|
|
||||||
// unable to allocate after the collection.
|
|
||||||
for (int try_count = 1; /* we'll return or break */; try_count += 1) {
|
|
||||||
bool succeeded = true;
|
|
||||||
|
|
||||||
// Given that humongous objects are not allocated in young
|
|
||||||
// regions, we'll first try to do the allocation without doing a
|
|
||||||
// collection hoping that there's enough space in the heap.
|
|
||||||
result = humongous_obj_allocate(word_size);
|
|
||||||
assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
|
|
||||||
"catch a regression of this bug.");
|
|
||||||
if (result != NULL) {
|
|
||||||
if (!at_safepoint) {
|
|
||||||
// If we're not at a safepoint, unlock the Heap_lock.
|
|
||||||
Heap_lock->unlock();
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we failed to allocate the humongous object, we should try to
|
|
||||||
// do a collection pause (if we're allowed) in case it reclaims
|
|
||||||
// enough space for the allocation to succeed after the pause.
|
|
||||||
if (!at_safepoint) {
|
|
||||||
// Read the GC count while holding the Heap_lock
|
|
||||||
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
|
|
||||||
|
|
||||||
// If we're allowed to do a collection we're not at a
|
|
||||||
// safepoint, so it is safe to unlock the Heap_lock.
|
|
||||||
Heap_lock->unlock();
|
|
||||||
|
|
||||||
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
|
||||||
assert_heap_not_locked();
|
|
||||||
if (result != NULL) {
|
|
||||||
assert(succeeded, "the VM op should have succeeded");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we get here, the VM operation either did not succeed
|
|
||||||
// (i.e., another thread beat us to it) or it succeeded but
|
|
||||||
// failed to allocate the object.
|
|
||||||
|
|
||||||
// If we're allowed to do a collection we're not at a
|
|
||||||
// safepoint, so it is safe to lock the Heap_lock.
|
|
||||||
Heap_lock->lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(result == NULL, "otherwise we should have exited the loop earlier");
|
|
||||||
|
|
||||||
// So far our attempts to allocate failed. The only time we'll go
|
|
||||||
// around the loop and try again is if we tried to do a GC and the
|
|
||||||
// VM op that we tried to schedule was not successful because
|
|
||||||
// another thread beat us to it. That way it's possible that some
|
|
||||||
// space was freed up by the thread that successfully scheduled a
|
|
||||||
// GC. So it's worth trying to allocate again.
|
|
||||||
if (succeeded) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Give a warning if we seem to be looping forever.
|
|
||||||
if ((QueuedAllocationWarningCount > 0) &&
|
|
||||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
|
||||||
warning("G1CollectedHeap::attempt_allocation_humongous "
|
|
||||||
"retries %d times", try_count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
|
||||||
bool expect_null_cur_alloc_region) {
|
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
|
|
||||||
err_msg("the current alloc region was unexpectedly found "
|
|
||||||
"to be non-NULL, cur alloc region: "PTR_FORMAT" "
|
|
||||||
"expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
|
|
||||||
_cur_alloc_region, expect_null_cur_alloc_region, word_size));
|
|
||||||
|
|
||||||
if (!isHumongous(word_size)) {
|
|
||||||
if (!expect_null_cur_alloc_region) {
|
|
||||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
|
||||||
if (cur_alloc_region != NULL) {
|
|
||||||
// We are at a safepoint so no reason to use the MT-safe version.
|
|
||||||
HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert(is_in(result), "result should be in the heap");
|
|
||||||
|
|
||||||
// We will not do any dirtying here. This is guaranteed to be
|
|
||||||
// called during a safepoint and the thread that scheduled the
|
|
||||||
// pause will do the dirtying if we return a non-NULL result.
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
retire_cur_alloc_region_common(cur_alloc_region);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(_cur_alloc_region == NULL,
|
|
||||||
"at this point we should have no cur alloc region");
|
|
||||||
return replace_cur_alloc_region_and_allocate(word_size,
|
|
||||||
true, /* at_safepoint */
|
|
||||||
false /* do_dirtying */,
|
|
||||||
false /* can_expand */);
|
|
||||||
} else {
|
|
||||||
return attempt_allocation_humongous(word_size,
|
|
||||||
true /* at_safepoint */);
|
|
||||||
}
|
|
||||||
|
|
||||||
ShouldNotReachHere();
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
|
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
|
||||||
|
|
||||||
// First attempt: Try allocating out of the current alloc region
|
unsigned int dummy_gc_count_before;
|
||||||
// using a CAS. If that fails, take the Heap_lock and retry the
|
return attempt_allocation(word_size, &dummy_gc_count_before);
|
||||||
// allocation, potentially replacing the current alloc region.
|
|
||||||
HeapWord* result = attempt_allocation(word_size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second attempt: Go to the slower path where we might try to
|
|
||||||
// schedule a collection.
|
|
||||||
result = attempt_allocation_slow(word_size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked();
|
|
||||||
// Need to unlock the Heap_lock before returning.
|
|
||||||
Heap_lock->unlock();
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord*
|
HeapWord*
|
||||||
|
@ -1200,48 +805,18 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||||
assert(!is_tlab, "mem_allocate() this should not be called directly "
|
assert(!is_tlab, "mem_allocate() this should not be called directly "
|
||||||
"to allocate TLABs");
|
"to allocate TLABs");
|
||||||
|
|
||||||
// Loop until the allocation is satisified,
|
// Loop until the allocation is satisified, or unsatisfied after GC.
|
||||||
// or unsatisfied after GC.
|
|
||||||
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||||
unsigned int gc_count_before;
|
unsigned int gc_count_before;
|
||||||
{
|
|
||||||
if (!isHumongous(word_size)) {
|
|
||||||
// First attempt: Try allocating out of the current alloc region
|
|
||||||
// using a CAS. If that fails, take the Heap_lock and retry the
|
|
||||||
// allocation, potentially replacing the current alloc region.
|
|
||||||
HeapWord* result = attempt_allocation(word_size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked();
|
HeapWord* result = NULL;
|
||||||
|
if (!isHumongous(word_size)) {
|
||||||
// Second attempt: Go to the slower path where we might try to
|
result = attempt_allocation(word_size, &gc_count_before);
|
||||||
// schedule a collection.
|
} else {
|
||||||
result = attempt_allocation_slow(word_size);
|
result = attempt_allocation_humongous(word_size, &gc_count_before);
|
||||||
if (result != NULL) {
|
}
|
||||||
assert_heap_not_locked();
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// attempt_allocation_humongous() requires the Heap_lock to be held.
|
|
||||||
Heap_lock->lock();
|
|
||||||
|
|
||||||
HeapWord* result = attempt_allocation_humongous(word_size,
|
|
||||||
false /* at_safepoint */);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked();
|
|
||||||
// Read the gc count while the heap lock is held.
|
|
||||||
gc_count_before = SharedHeap::heap()->total_collections();
|
|
||||||
|
|
||||||
// Release the Heap_lock before attempting the collection.
|
|
||||||
Heap_lock->unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the garbage collection operation...
|
// Create the garbage collection operation...
|
||||||
|
@ -1249,7 +824,6 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||||
// ...and get the VM thread to execute it.
|
// ...and get the VM thread to execute it.
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
assert_heap_not_locked();
|
|
||||||
if (op.prologue_succeeded() && op.pause_succeeded()) {
|
if (op.prologue_succeeded() && op.pause_succeeded()) {
|
||||||
// If the operation was successful we'll return the result even
|
// If the operation was successful we'll return the result even
|
||||||
// if it is NULL. If the allocation attempt failed immediately
|
// if it is NULL. If the allocation attempt failed immediately
|
||||||
|
@ -1275,21 +849,207 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::abandon_cur_alloc_region() {
|
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
unsigned int *gc_count_before_ret) {
|
||||||
|
// Make sure you read the note in attempt_allocation_humongous().
|
||||||
|
|
||||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
if (cur_alloc_region != NULL) {
|
assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
|
||||||
assert(!cur_alloc_region->is_empty(),
|
"be called for humongous allocation requests");
|
||||||
"the current alloc region can never be empty");
|
|
||||||
assert(cur_alloc_region->is_young(),
|
|
||||||
"the current alloc region should be young");
|
|
||||||
|
|
||||||
retire_cur_alloc_region_common(cur_alloc_region);
|
// We should only get here after the first-level allocation attempt
|
||||||
|
// (attempt_allocation()) failed to allocate.
|
||||||
|
|
||||||
|
// We will loop until a) we manage to successfully perform the
|
||||||
|
// allocation or b) we successfully schedule a collection which
|
||||||
|
// fails to perform the allocation. b) is the only case when we'll
|
||||||
|
// return NULL.
|
||||||
|
HeapWord* result = NULL;
|
||||||
|
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||||
|
bool should_try_gc;
|
||||||
|
unsigned int gc_count_before;
|
||||||
|
|
||||||
|
{
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
|
||||||
|
result = _mutator_alloc_region.attempt_allocation_locked(word_size,
|
||||||
|
false /* bot_updates */);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we reach here, attempt_allocation_locked() above failed to
|
||||||
|
// allocate a new region. So the mutator alloc region should be NULL.
|
||||||
|
assert(_mutator_alloc_region.get() == NULL, "only way to get here");
|
||||||
|
|
||||||
|
if (GC_locker::is_active_and_needs_gc()) {
|
||||||
|
if (g1_policy()->can_expand_young_list()) {
|
||||||
|
result = _mutator_alloc_region.attempt_allocation_force(word_size,
|
||||||
|
false /* bot_updates */);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
should_try_gc = false;
|
||||||
|
} else {
|
||||||
|
// Read the GC count while still holding the Heap_lock.
|
||||||
|
gc_count_before = SharedHeap::heap()->total_collections();
|
||||||
|
should_try_gc = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (should_try_gc) {
|
||||||
|
bool succeeded;
|
||||||
|
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
||||||
|
if (result != NULL) {
|
||||||
|
assert(succeeded, "only way to get back a non-NULL result");
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (succeeded) {
|
||||||
|
// If we get here we successfully scheduled a collection which
|
||||||
|
// failed to allocate. No point in trying to allocate
|
||||||
|
// further. We'll just return NULL.
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
*gc_count_before_ret = SharedHeap::heap()->total_collections();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
GC_locker::stall_until_clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can reach here if we were unsuccessul in scheduling a
|
||||||
|
// collection (because another thread beat us to it) or if we were
|
||||||
|
// stalled due to the GC locker. In either can we should retry the
|
||||||
|
// allocation attempt in case another thread successfully
|
||||||
|
// performed a collection and reclaimed enough space. We do the
|
||||||
|
// first attempt (without holding the Heap_lock) here and the
|
||||||
|
// follow-on attempt will be at the start of the next loop
|
||||||
|
// iteration (after taking the Heap_lock).
|
||||||
|
result = _mutator_alloc_region.attempt_allocation(word_size,
|
||||||
|
false /* bot_updates */);
|
||||||
|
if (result != NULL ){
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Give a warning if we seem to be looping forever.
|
||||||
|
if ((QueuedAllocationWarningCount > 0) &&
|
||||||
|
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||||
|
warning("G1CollectedHeap::attempt_allocation_slow() "
|
||||||
|
"retries %d times", try_count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assert(_cur_alloc_region == NULL, "post-condition");
|
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||||
|
unsigned int * gc_count_before_ret) {
|
||||||
|
// The structure of this method has a lot of similarities to
|
||||||
|
// attempt_allocation_slow(). The reason these two were not merged
|
||||||
|
// into a single one is that such a method would require several "if
|
||||||
|
// allocation is not humongous do this, otherwise do that"
|
||||||
|
// conditional paths which would obscure its flow. In fact, an early
|
||||||
|
// version of this code did use a unified method which was harder to
|
||||||
|
// follow and, as a result, it had subtle bugs that were hard to
|
||||||
|
// track down. So keeping these two methods separate allows each to
|
||||||
|
// be more readable. It will be good to keep these two in sync as
|
||||||
|
// much as possible.
|
||||||
|
|
||||||
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
|
assert(isHumongous(word_size), "attempt_allocation_humongous() "
|
||||||
|
"should only be called for humongous allocations");
|
||||||
|
|
||||||
|
// We will loop until a) we manage to successfully perform the
|
||||||
|
// allocation or b) we successfully schedule a collection which
|
||||||
|
// fails to perform the allocation. b) is the only case when we'll
|
||||||
|
// return NULL.
|
||||||
|
HeapWord* result = NULL;
|
||||||
|
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||||
|
bool should_try_gc;
|
||||||
|
unsigned int gc_count_before;
|
||||||
|
|
||||||
|
{
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
|
||||||
|
// Given that humongous objects are not allocated in young
|
||||||
|
// regions, we'll first try to do the allocation without doing a
|
||||||
|
// collection hoping that there's enough space in the heap.
|
||||||
|
result = humongous_obj_allocate(word_size);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (GC_locker::is_active_and_needs_gc()) {
|
||||||
|
should_try_gc = false;
|
||||||
|
} else {
|
||||||
|
// Read the GC count while still holding the Heap_lock.
|
||||||
|
gc_count_before = SharedHeap::heap()->total_collections();
|
||||||
|
should_try_gc = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (should_try_gc) {
|
||||||
|
// If we failed to allocate the humongous object, we should try to
|
||||||
|
// do a collection pause (if we're allowed) in case it reclaims
|
||||||
|
// enough space for the allocation to succeed after the pause.
|
||||||
|
|
||||||
|
bool succeeded;
|
||||||
|
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
||||||
|
if (result != NULL) {
|
||||||
|
assert(succeeded, "only way to get back a non-NULL result");
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (succeeded) {
|
||||||
|
// If we get here we successfully scheduled a collection which
|
||||||
|
// failed to allocate. No point in trying to allocate
|
||||||
|
// further. We'll just return NULL.
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
*gc_count_before_ret = SharedHeap::heap()->total_collections();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
GC_locker::stall_until_clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can reach here if we were unsuccessul in scheduling a
|
||||||
|
// collection (because another thread beat us to it) or if we were
|
||||||
|
// stalled due to the GC locker. In either can we should retry the
|
||||||
|
// allocation attempt in case another thread successfully
|
||||||
|
// performed a collection and reclaimed enough space. Give a
|
||||||
|
// warning if we seem to be looping forever.
|
||||||
|
|
||||||
|
if ((QueuedAllocationWarningCount > 0) &&
|
||||||
|
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||||
|
warning("G1CollectedHeap::attempt_allocation_humongous() "
|
||||||
|
"retries %d times", try_count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||||
|
bool expect_null_mutator_alloc_region) {
|
||||||
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
assert(_mutator_alloc_region.get() == NULL ||
|
||||||
|
!expect_null_mutator_alloc_region,
|
||||||
|
"the current alloc region was unexpectedly found to be non-NULL");
|
||||||
|
|
||||||
|
if (!isHumongous(word_size)) {
|
||||||
|
return _mutator_alloc_region.attempt_allocation_locked(word_size,
|
||||||
|
false /* bot_updates */);
|
||||||
|
} else {
|
||||||
|
return humongous_obj_allocate(word_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::abandon_gc_alloc_regions() {
|
void G1CollectedHeap::abandon_gc_alloc_regions() {
|
||||||
|
@ -1417,8 +1177,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||||
|
|
||||||
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
prepare_for_verify();
|
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||||
|
prepare_for_verify();
|
||||||
Universe::verify(true);
|
Universe::verify(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1439,9 +1199,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||||
concurrent_mark()->abort();
|
concurrent_mark()->abort();
|
||||||
|
|
||||||
// Make sure we'll choose a new allocation region afterwards.
|
// Make sure we'll choose a new allocation region afterwards.
|
||||||
abandon_cur_alloc_region();
|
release_mutator_alloc_region();
|
||||||
abandon_gc_alloc_regions();
|
abandon_gc_alloc_regions();
|
||||||
assert(_cur_alloc_region == NULL, "Invariant.");
|
|
||||||
g1_rem_set()->cleanupHRRS();
|
g1_rem_set()->cleanupHRRS();
|
||||||
tear_down_region_lists();
|
tear_down_region_lists();
|
||||||
|
|
||||||
|
@ -1547,6 +1306,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||||
// evacuation pause.
|
// evacuation pause.
|
||||||
clear_cset_fast_test();
|
clear_cset_fast_test();
|
||||||
|
|
||||||
|
init_mutator_alloc_region();
|
||||||
|
|
||||||
double end = os::elapsedTime();
|
double end = os::elapsedTime();
|
||||||
g1_policy()->record_full_collection_end();
|
g1_policy()->record_full_collection_end();
|
||||||
|
|
||||||
|
@ -1720,8 +1481,9 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||||
|
|
||||||
*succeeded = true;
|
*succeeded = true;
|
||||||
// Let's attempt the allocation first.
|
// Let's attempt the allocation first.
|
||||||
HeapWord* result = attempt_allocation_at_safepoint(word_size,
|
HeapWord* result =
|
||||||
false /* expect_null_cur_alloc_region */);
|
attempt_allocation_at_safepoint(word_size,
|
||||||
|
false /* expect_null_mutator_alloc_region */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
|
@ -1748,7 +1510,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||||
|
|
||||||
// Retry the allocation
|
// Retry the allocation
|
||||||
result = attempt_allocation_at_safepoint(word_size,
|
result = attempt_allocation_at_safepoint(word_size,
|
||||||
true /* expect_null_cur_alloc_region */);
|
true /* expect_null_mutator_alloc_region */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
|
@ -1765,7 +1527,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||||
|
|
||||||
// Retry the allocation once more
|
// Retry the allocation once more
|
||||||
result = attempt_allocation_at_safepoint(word_size,
|
result = attempt_allocation_at_safepoint(word_size,
|
||||||
true /* expect_null_cur_alloc_region */);
|
true /* expect_null_mutator_alloc_region */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
|
@ -1796,7 +1558,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||||
if (expand(expand_bytes)) {
|
if (expand(expand_bytes)) {
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
return attempt_allocation_at_safepoint(word_size,
|
return attempt_allocation_at_safepoint(word_size,
|
||||||
false /* expect_null_cur_alloc_region */);
|
false /* expect_null_mutator_alloc_region */);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -1940,7 +1702,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||||
_evac_failure_scan_stack(NULL) ,
|
_evac_failure_scan_stack(NULL) ,
|
||||||
_mark_in_progress(false),
|
_mark_in_progress(false),
|
||||||
_cg1r(NULL), _summary_bytes_used(0),
|
_cg1r(NULL), _summary_bytes_used(0),
|
||||||
_cur_alloc_region(NULL),
|
|
||||||
_refine_cte_cl(NULL),
|
_refine_cte_cl(NULL),
|
||||||
_full_collection(false),
|
_full_collection(false),
|
||||||
_free_list("Master Free List"),
|
_free_list("Master Free List"),
|
||||||
|
@ -2099,7 +1860,6 @@ jint G1CollectedHeap::initialize() {
|
||||||
_g1_max_committed = _g1_committed;
|
_g1_max_committed = _g1_committed;
|
||||||
_hrs = new HeapRegionSeq(_expansion_regions);
|
_hrs = new HeapRegionSeq(_expansion_regions);
|
||||||
guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
|
guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
|
||||||
guarantee(_cur_alloc_region == NULL, "from constructor");
|
|
||||||
|
|
||||||
// 6843694 - ensure that the maximum region index can fit
|
// 6843694 - ensure that the maximum region index can fit
|
||||||
// in the remembered set structures.
|
// in the remembered set structures.
|
||||||
|
@ -2195,6 +1955,22 @@ jint G1CollectedHeap::initialize() {
|
||||||
// Do later initialization work for concurrent refinement.
|
// Do later initialization work for concurrent refinement.
|
||||||
_cg1r->init();
|
_cg1r->init();
|
||||||
|
|
||||||
|
// Here we allocate the dummy full region that is required by the
|
||||||
|
// G1AllocRegion class. If we don't pass an address in the reserved
|
||||||
|
// space here, lots of asserts fire.
|
||||||
|
MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
|
||||||
|
HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true);
|
||||||
|
// We'll re-use the same region whether the alloc region will
|
||||||
|
// require BOT updates or not and, if it doesn't, then a non-young
|
||||||
|
// region will complain that it cannot support allocations without
|
||||||
|
// BOT updates. So we'll tag the dummy region as young to avoid that.
|
||||||
|
dummy_region->set_young();
|
||||||
|
// Make sure it's full.
|
||||||
|
dummy_region->set_top(dummy_region->end());
|
||||||
|
G1AllocRegion::setup(this, dummy_region);
|
||||||
|
|
||||||
|
init_mutator_alloc_region();
|
||||||
|
|
||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2261,7 +2037,7 @@ size_t G1CollectedHeap::used() const {
|
||||||
"Should be owned on this thread's behalf.");
|
"Should be owned on this thread's behalf.");
|
||||||
size_t result = _summary_bytes_used;
|
size_t result = _summary_bytes_used;
|
||||||
// Read only once in case it is set to NULL concurrently
|
// Read only once in case it is set to NULL concurrently
|
||||||
HeapRegion* hr = _cur_alloc_region;
|
HeapRegion* hr = _mutator_alloc_region.get();
|
||||||
if (hr != NULL)
|
if (hr != NULL)
|
||||||
result += hr->used();
|
result += hr->used();
|
||||||
return result;
|
return result;
|
||||||
|
@ -2324,13 +2100,11 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
|
||||||
// to free(), resulting in a SIGSEGV. Note that this doesn't appear
|
// to free(), resulting in a SIGSEGV. Note that this doesn't appear
|
||||||
// to be a problem in the optimized build, since the two loads of the
|
// to be a problem in the optimized build, since the two loads of the
|
||||||
// current allocation region field are optimized away.
|
// current allocation region field are optimized away.
|
||||||
HeapRegion* car = _cur_alloc_region;
|
HeapRegion* hr = _mutator_alloc_region.get();
|
||||||
|
if (hr == NULL) {
|
||||||
// FIXME: should iterate over all regions?
|
|
||||||
if (car == NULL) {
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return car->free();
|
return hr->free();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||||
|
@ -2781,16 +2555,12 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||||
// since we can't allow tlabs to grow big enough to accomodate
|
// since we can't allow tlabs to grow big enough to accomodate
|
||||||
// humongous objects.
|
// humongous objects.
|
||||||
|
|
||||||
// We need to store the cur alloc region locally, since it might change
|
HeapRegion* hr = _mutator_alloc_region.get();
|
||||||
// between when we test for NULL and when we use it later.
|
|
||||||
ContiguousSpace* cur_alloc_space = _cur_alloc_region;
|
|
||||||
size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
|
size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
|
||||||
|
if (hr == NULL) {
|
||||||
if (cur_alloc_space == NULL) {
|
|
||||||
return max_tlab_size;
|
return max_tlab_size;
|
||||||
} else {
|
} else {
|
||||||
return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
|
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
|
||||||
max_tlab_size);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3364,6 +3134,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
|
verify_dirty_young_regions();
|
||||||
|
|
||||||
{
|
{
|
||||||
// This call will decide whether this pause is an initial-mark
|
// This call will decide whether this pause is an initial-mark
|
||||||
|
@ -3425,8 +3196,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
|
|
||||||
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
prepare_for_verify();
|
|
||||||
gclog_or_tty->print(" VerifyBeforeGC:");
|
gclog_or_tty->print(" VerifyBeforeGC:");
|
||||||
|
prepare_for_verify();
|
||||||
Universe::verify(false);
|
Universe::verify(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3442,7 +3213,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
|
|
||||||
// Forget the current alloc region (we might even choose it to be part
|
// Forget the current alloc region (we might even choose it to be part
|
||||||
// of the collection set!).
|
// of the collection set!).
|
||||||
abandon_cur_alloc_region();
|
release_mutator_alloc_region();
|
||||||
|
|
||||||
// The elapsed time induced by the start time below deliberately elides
|
// The elapsed time induced by the start time below deliberately elides
|
||||||
// the possible verification above.
|
// the possible verification above.
|
||||||
|
@ -3573,6 +3344,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||||
#endif // YOUNG_LIST_VERBOSE
|
#endif // YOUNG_LIST_VERBOSE
|
||||||
|
|
||||||
|
init_mutator_alloc_region();
|
||||||
|
|
||||||
double end_time_sec = os::elapsedTime();
|
double end_time_sec = os::elapsedTime();
|
||||||
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
|
||||||
g1_policy()->record_pause_time_ms(pause_time_ms);
|
g1_policy()->record_pause_time_ms(pause_time_ms);
|
||||||
|
@ -3655,6 +3428,15 @@ size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
|
||||||
return gclab_word_size;
|
return gclab_word_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::init_mutator_alloc_region() {
|
||||||
|
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
|
||||||
|
_mutator_alloc_region.init();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::release_mutator_alloc_region() {
|
||||||
|
_mutator_alloc_region.release();
|
||||||
|
assert(_mutator_alloc_region.get() == NULL, "post-condition");
|
||||||
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
||||||
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
|
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
|
||||||
|
@ -5140,10 +4922,8 @@ class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||||
CardTableModRefBS* _ct_bs;
|
CardTableModRefBS* _ct_bs;
|
||||||
public:
|
public:
|
||||||
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
|
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
|
||||||
: _ct_bs(ct_bs)
|
: _ct_bs(ct_bs) { }
|
||||||
{ }
|
virtual bool doHeapRegion(HeapRegion* r) {
|
||||||
virtual bool doHeapRegion(HeapRegion* r)
|
|
||||||
{
|
|
||||||
MemRegion mr(r->bottom(), r->end());
|
MemRegion mr(r->bottom(), r->end());
|
||||||
if (r->is_survivor()) {
|
if (r->is_survivor()) {
|
||||||
_ct_bs->verify_dirty_region(mr);
|
_ct_bs->verify_dirty_region(mr);
|
||||||
|
@ -5153,6 +4933,29 @@ public:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
|
||||||
|
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
|
||||||
|
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
|
||||||
|
// We cannot guarantee that [bottom(),end()] is dirty. Threads
|
||||||
|
// dirty allocated blocks as they allocate them. The thread that
|
||||||
|
// retires each region and replaces it with a new one will do a
|
||||||
|
// maximal allocation to fill in [pre_dummy_top(),end()] but will
|
||||||
|
// not dirty that area (one less thing to have to do while holding
|
||||||
|
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
||||||
|
// is dirty. Also note that verify_dirty_region() requires
|
||||||
|
// mr.start() and mr.end() to be card aligned and pre_dummy_top()
|
||||||
|
// is not guaranteed to be.
|
||||||
|
MemRegion mr(hr->bottom(),
|
||||||
|
ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
|
||||||
|
ct_bs->verify_dirty_region(mr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::verify_dirty_young_regions() {
|
||||||
|
verify_dirty_young_list(_young_list->first_region());
|
||||||
|
verify_dirty_young_list(_young_list->first_survivor_region());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void G1CollectedHeap::cleanUpCardTable() {
|
void G1CollectedHeap::cleanUpCardTable() {
|
||||||
|
@ -5500,6 +5303,44 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
|
||||||
|
bool force) {
|
||||||
|
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
assert(!force || g1_policy()->can_expand_young_list(),
|
||||||
|
"if force is true we should be able to expand the young list");
|
||||||
|
if (force || !g1_policy()->is_young_list_full()) {
|
||||||
|
HeapRegion* new_alloc_region = new_region(word_size,
|
||||||
|
false /* do_expand */);
|
||||||
|
if (new_alloc_region != NULL) {
|
||||||
|
g1_policy()->update_region_num(true /* next_is_young */);
|
||||||
|
set_region_short_lived_locked(new_alloc_region);
|
||||||
|
return new_alloc_region;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes) {
|
||||||
|
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
assert(alloc_region->is_young(), "all mutator alloc regions should be young");
|
||||||
|
|
||||||
|
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
|
||||||
|
_summary_bytes_used += allocated_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
|
||||||
|
bool force) {
|
||||||
|
return _g1h->new_mutator_alloc_region(word_size, force);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes) {
|
||||||
|
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heap region set verification
|
||||||
|
|
||||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||||
private:
|
private:
|
||||||
HumongousRegionSet* _humongous_set;
|
HumongousRegionSet* _humongous_set;
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||||
|
|
||||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||||
|
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSets.hpp"
|
#include "gc_implementation/g1/heapRegionSets.hpp"
|
||||||
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
|
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
|
||||||
|
@ -128,6 +129,15 @@ public:
|
||||||
void print();
|
void print();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MutatorAllocRegion : public G1AllocRegion {
|
||||||
|
protected:
|
||||||
|
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||||
|
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||||
|
public:
|
||||||
|
MutatorAllocRegion()
|
||||||
|
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
||||||
|
};
|
||||||
|
|
||||||
class RefineCardTableEntryClosure;
|
class RefineCardTableEntryClosure;
|
||||||
class G1CollectedHeap : public SharedHeap {
|
class G1CollectedHeap : public SharedHeap {
|
||||||
friend class VM_G1CollectForAllocation;
|
friend class VM_G1CollectForAllocation;
|
||||||
|
@ -135,6 +145,7 @@ class G1CollectedHeap : public SharedHeap {
|
||||||
friend class VM_G1CollectFull;
|
friend class VM_G1CollectFull;
|
||||||
friend class VM_G1IncCollectionPause;
|
friend class VM_G1IncCollectionPause;
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
friend class MutatorAllocRegion;
|
||||||
|
|
||||||
// Closures used in implementation.
|
// Closures used in implementation.
|
||||||
friend class G1ParCopyHelper;
|
friend class G1ParCopyHelper;
|
||||||
|
@ -197,12 +208,15 @@ private:
|
||||||
// The sequence of all heap regions in the heap.
|
// The sequence of all heap regions in the heap.
|
||||||
HeapRegionSeq* _hrs;
|
HeapRegionSeq* _hrs;
|
||||||
|
|
||||||
// The region from which normal-sized objects are currently being
|
// Alloc region used to satisfy mutator allocation requests.
|
||||||
// allocated. May be NULL.
|
MutatorAllocRegion _mutator_alloc_region;
|
||||||
HeapRegion* _cur_alloc_region;
|
|
||||||
|
// It resets the mutator alloc region before new allocations can take place.
|
||||||
|
void init_mutator_alloc_region();
|
||||||
|
|
||||||
|
// It releases the mutator alloc region.
|
||||||
|
void release_mutator_alloc_region();
|
||||||
|
|
||||||
// Postcondition: cur_alloc_region == NULL.
|
|
||||||
void abandon_cur_alloc_region();
|
|
||||||
void abandon_gc_alloc_regions();
|
void abandon_gc_alloc_regions();
|
||||||
|
|
||||||
// The to-space memory regions into which objects are being copied during
|
// The to-space memory regions into which objects are being copied during
|
||||||
|
@ -360,27 +374,21 @@ protected:
|
||||||
G1CollectorPolicy* _g1_policy;
|
G1CollectorPolicy* _g1_policy;
|
||||||
|
|
||||||
// This is the second level of trying to allocate a new region. If
|
// This is the second level of trying to allocate a new region. If
|
||||||
// new_region_work didn't find a region in the free_list, this call
|
// new_region() didn't find a region on the free_list, this call will
|
||||||
// will check whether there's anything available in the
|
// check whether there's anything available on the
|
||||||
// secondary_free_list and/or wait for more regions to appear in that
|
// secondary_free_list and/or wait for more regions to appear on
|
||||||
// list, if _free_regions_coming is set.
|
// that list, if _free_regions_coming is set.
|
||||||
HeapRegion* new_region_try_secondary_free_list();
|
HeapRegion* new_region_try_secondary_free_list();
|
||||||
|
|
||||||
// Try to allocate a single non-humongous HeapRegion sufficient for
|
// Try to allocate a single non-humongous HeapRegion sufficient for
|
||||||
// an allocation of the given word_size. If do_expand is true,
|
// an allocation of the given word_size. If do_expand is true,
|
||||||
// attempt to expand the heap if necessary to satisfy the allocation
|
// attempt to expand the heap if necessary to satisfy the allocation
|
||||||
// request.
|
// request.
|
||||||
HeapRegion* new_region_work(size_t word_size, bool do_expand);
|
HeapRegion* new_region(size_t word_size, bool do_expand);
|
||||||
|
|
||||||
// Try to allocate a new region to be used for allocation by a
|
// Try to allocate a new region to be used for allocation by
|
||||||
// mutator thread. Attempt to expand the heap if no region is
|
// a GC thread. It will try to expand the heap if no region is
|
||||||
// available.
|
// available.
|
||||||
HeapRegion* new_alloc_region(size_t word_size) {
|
|
||||||
return new_region_work(word_size, false /* do_expand */);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to allocate a new region to be used for allocation by a GC
|
|
||||||
// thread. Attempt to expand the heap if no region is available.
|
|
||||||
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
|
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
|
||||||
|
|
||||||
// Attempt to satisfy a humongous allocation request of the given
|
// Attempt to satisfy a humongous allocation request of the given
|
||||||
|
@ -415,10 +423,6 @@ protected:
|
||||||
// * All non-TLAB allocation requests should go to mem_allocate()
|
// * All non-TLAB allocation requests should go to mem_allocate()
|
||||||
// and mem_allocate() should never be called with is_tlab == true.
|
// and mem_allocate() should never be called with is_tlab == true.
|
||||||
//
|
//
|
||||||
// * If the GC locker is active we currently stall until we can
|
|
||||||
// allocate a new young region. This will be changed in the
|
|
||||||
// near future (see CR 6994056).
|
|
||||||
//
|
|
||||||
// * If either call cannot satisfy the allocation request using the
|
// * If either call cannot satisfy the allocation request using the
|
||||||
// current allocating region, they will try to get a new one. If
|
// current allocating region, they will try to get a new one. If
|
||||||
// this fails, they will attempt to do an evacuation pause and
|
// this fails, they will attempt to do an evacuation pause and
|
||||||
|
@ -441,122 +445,38 @@ protected:
|
||||||
bool is_tlab, /* expected to be false */
|
bool is_tlab, /* expected to be false */
|
||||||
bool* gc_overhead_limit_was_exceeded);
|
bool* gc_overhead_limit_was_exceeded);
|
||||||
|
|
||||||
// The following methods, allocate_from_cur_allocation_region(),
|
// The following three methods take a gc_count_before_ret
|
||||||
// attempt_allocation(), attempt_allocation_locked(),
|
// parameter which is used to return the GC count if the method
|
||||||
// replace_cur_alloc_region_and_allocate(),
|
// returns NULL. Given that we are required to read the GC count
|
||||||
// attempt_allocation_slow(), and attempt_allocation_humongous()
|
// while holding the Heap_lock, and these paths will take the
|
||||||
// have very awkward pre- and post-conditions with respect to
|
// Heap_lock at some point, it's easier to get them to read the GC
|
||||||
// locking:
|
// count while holding the Heap_lock before they return NULL instead
|
||||||
//
|
// of the caller (namely: mem_allocate()) having to also take the
|
||||||
// If they are called outside a safepoint they assume the caller
|
// Heap_lock just to read the GC count.
|
||||||
// holds the Heap_lock when it calls them. However, on exit they
|
|
||||||
// will release the Heap_lock if they return a non-NULL result, but
|
|
||||||
// keep holding the Heap_lock if they return a NULL result. The
|
|
||||||
// reason for this is that we need to dirty the cards that span
|
|
||||||
// allocated blocks on young regions to avoid having to take the
|
|
||||||
// slow path of the write barrier (for performance reasons we don't
|
|
||||||
// update RSets for references whose source is a young region, so we
|
|
||||||
// don't need to look at dirty cards on young regions). But, doing
|
|
||||||
// this card dirtying while holding the Heap_lock can be a
|
|
||||||
// scalability bottleneck, especially given that some allocation
|
|
||||||
// requests might be of non-trivial size (and the larger the region
|
|
||||||
// size is, the fewer allocations requests will be considered
|
|
||||||
// humongous, as the humongous size limit is a fraction of the
|
|
||||||
// region size). So, when one of these calls succeeds in allocating
|
|
||||||
// a block it does the card dirtying after it releases the Heap_lock
|
|
||||||
// which is why it will return without holding it.
|
|
||||||
//
|
|
||||||
// The above assymetry is the reason why locking / unlocking is done
|
|
||||||
// explicitly (i.e., with Heap_lock->lock() and
|
|
||||||
// Heap_lock->unlocked()) instead of using MutexLocker and
|
|
||||||
// MutexUnlocker objects. The latter would ensure that the lock is
|
|
||||||
// unlocked / re-locked at every possible exit out of the basic
|
|
||||||
// block. However, we only want that action to happen in selected
|
|
||||||
// places.
|
|
||||||
//
|
|
||||||
// Further, if the above methods are called during a safepoint, then
|
|
||||||
// naturally there's no assumption about the Heap_lock being held or
|
|
||||||
// there's no attempt to unlock it. The parameter at_safepoint
|
|
||||||
// indicates whether the call is made during a safepoint or not (as
|
|
||||||
// an optimization, to avoid reading the global flag with
|
|
||||||
// SafepointSynchronize::is_at_safepoint()).
|
|
||||||
//
|
|
||||||
// The methods share these parameters:
|
|
||||||
//
|
|
||||||
// * word_size : the size of the allocation request in words
|
|
||||||
// * at_safepoint : whether the call is done at a safepoint; this
|
|
||||||
// also determines whether a GC is permitted
|
|
||||||
// (at_safepoint == false) or not (at_safepoint == true)
|
|
||||||
// * do_dirtying : whether the method should dirty the allocated
|
|
||||||
// block before returning
|
|
||||||
//
|
|
||||||
// They all return either the address of the block, if they
|
|
||||||
// successfully manage to allocate it, or NULL.
|
|
||||||
|
|
||||||
// It tries to satisfy an allocation request out of the current
|
// First-level mutator allocation attempt: try to allocate out of
|
||||||
// alloc region, which is passed as a parameter. It assumes that the
|
// the mutator alloc region without taking the Heap_lock. This
|
||||||
// caller has checked that the current alloc region is not NULL.
|
// should only be used for non-humongous allocations.
|
||||||
// Given that the caller has to check the current alloc region for
|
inline HeapWord* attempt_allocation(size_t word_size,
|
||||||
// at least NULL, it might as well pass it as the first parameter so
|
unsigned int* gc_count_before_ret);
|
||||||
// that the method doesn't have to read it from the
|
|
||||||
// _cur_alloc_region field again. It is called from both
|
|
||||||
// attempt_allocation() and attempt_allocation_locked() and the
|
|
||||||
// with_heap_lock parameter indicates whether the caller was holding
|
|
||||||
// the heap lock when it called it or not.
|
|
||||||
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
|
||||||
size_t word_size,
|
|
||||||
bool with_heap_lock);
|
|
||||||
|
|
||||||
// First-level of allocation slow path: it attempts to allocate out
|
// Second-level mutator allocation attempt: take the Heap_lock and
|
||||||
// of the current alloc region in a lock-free manner using a CAS. If
|
// retry the allocation attempt, potentially scheduling a GC
|
||||||
// that fails it takes the Heap_lock and calls
|
// pause. This should only be used for non-humongous allocations.
|
||||||
// attempt_allocation_locked() for the second-level slow path.
|
HeapWord* attempt_allocation_slow(size_t word_size,
|
||||||
inline HeapWord* attempt_allocation(size_t word_size);
|
unsigned int* gc_count_before_ret);
|
||||||
|
|
||||||
// Second-level of allocation slow path: while holding the Heap_lock
|
// Takes the Heap_lock and attempts a humongous allocation. It can
|
||||||
// it tries to allocate out of the current alloc region and, if that
|
// potentially schedule a GC pause.
|
||||||
// fails, tries to allocate out of a new current alloc region.
|
|
||||||
inline HeapWord* attempt_allocation_locked(size_t word_size);
|
|
||||||
|
|
||||||
// It assumes that the current alloc region has been retired and
|
|
||||||
// tries to allocate a new one. If it's successful, it performs the
|
|
||||||
// allocation out of the new current alloc region and updates
|
|
||||||
// _cur_alloc_region. Normally, it would try to allocate a new
|
|
||||||
// region if the young gen is not full, unless can_expand is true in
|
|
||||||
// which case it would always try to allocate a new region.
|
|
||||||
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
|
|
||||||
bool at_safepoint,
|
|
||||||
bool do_dirtying,
|
|
||||||
bool can_expand);
|
|
||||||
|
|
||||||
// Third-level of allocation slow path: when we are unable to
|
|
||||||
// allocate a new current alloc region to satisfy an allocation
|
|
||||||
// request (i.e., when attempt_allocation_locked() fails). It will
|
|
||||||
// try to do an evacuation pause, which might stall due to the GC
|
|
||||||
// locker, and retry the allocation attempt when appropriate.
|
|
||||||
HeapWord* attempt_allocation_slow(size_t word_size);
|
|
||||||
|
|
||||||
// The method that tries to satisfy a humongous allocation
|
|
||||||
// request. If it cannot satisfy it it will try to do an evacuation
|
|
||||||
// pause to perhaps reclaim enough space to be able to satisfy the
|
|
||||||
// allocation request afterwards.
|
|
||||||
HeapWord* attempt_allocation_humongous(size_t word_size,
|
HeapWord* attempt_allocation_humongous(size_t word_size,
|
||||||
bool at_safepoint);
|
unsigned int* gc_count_before_ret);
|
||||||
|
|
||||||
// It does the common work when we are retiring the current alloc region.
|
// Allocation attempt that should be called during safepoints (e.g.,
|
||||||
inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
|
// at the end of a successful GC). expect_null_mutator_alloc_region
|
||||||
|
// specifies whether the mutator alloc region is expected to be NULL
|
||||||
// It retires the current alloc region, which is passed as a
|
// or not.
|
||||||
// parameter (since, typically, the caller is already holding on to
|
|
||||||
// it). It sets _cur_alloc_region to NULL.
|
|
||||||
void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
|
|
||||||
|
|
||||||
// It attempts to do an allocation immediately before or after an
|
|
||||||
// evacuation pause and can only be called by the VM thread. It has
|
|
||||||
// slightly different assumptions that the ones before (i.e.,
|
|
||||||
// assumes that the current alloc region has been retired).
|
|
||||||
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
||||||
bool expect_null_cur_alloc_region);
|
bool expect_null_mutator_alloc_region);
|
||||||
|
|
||||||
// It dirties the cards that cover the block so that so that the post
|
// It dirties the cards that cover the block so that so that the post
|
||||||
// write barrier never queues anything when updating objects on this
|
// write barrier never queues anything when updating objects on this
|
||||||
|
@ -583,6 +503,12 @@ protected:
|
||||||
// GC pause.
|
// GC pause.
|
||||||
void retire_alloc_region(HeapRegion* alloc_region, bool par);
|
void retire_alloc_region(HeapRegion* alloc_region, bool par);
|
||||||
|
|
||||||
|
// These two methods are the "callbacks" from the G1AllocRegion class.
|
||||||
|
|
||||||
|
HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
|
||||||
|
void retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes);
|
||||||
|
|
||||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||||
// inspection request and should collect the entire heap
|
// inspection request and should collect the entire heap
|
||||||
// - if clear_all_soft_refs is true, all soft references should be
|
// - if clear_all_soft_refs is true, all soft references should be
|
||||||
|
@ -1027,6 +953,9 @@ public:
|
||||||
// The number of regions available for "regular" expansion.
|
// The number of regions available for "regular" expansion.
|
||||||
size_t expansion_regions() { return _expansion_regions; }
|
size_t expansion_regions() { return _expansion_regions; }
|
||||||
|
|
||||||
|
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
|
||||||
|
void verify_dirty_young_regions() PRODUCT_RETURN;
|
||||||
|
|
||||||
// verify_region_sets() performs verification over the region
|
// verify_region_sets() performs verification over the region
|
||||||
// lists. It will be compiled in the product code to be used when
|
// lists. It will be compiled in the product code to be used when
|
||||||
// necessary (i.e., during heap verification).
|
// necessary (i.e., during heap verification).
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||||
#include "utilities/taskqueue.hpp"
|
#include "utilities/taskqueue.hpp"
|
||||||
|
@ -59,131 +60,23 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||||
return r != NULL && r->in_collection_set();
|
return r != NULL && r->in_collection_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
// See the comment in the .hpp file about the locking protocol and
|
|
||||||
// assumptions of this method (and other related ones).
|
|
||||||
inline HeapWord*
|
inline HeapWord*
|
||||||
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||||
size_t word_size,
|
unsigned int* gc_count_before_ret) {
|
||||||
bool with_heap_lock) {
|
|
||||||
assert_not_at_safepoint();
|
|
||||||
assert(with_heap_lock == Heap_lock->owned_by_self(),
|
|
||||||
"with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
|
|
||||||
assert(cur_alloc_region != NULL, "pre-condition of the method");
|
|
||||||
assert(cur_alloc_region->is_young(),
|
|
||||||
"we only support young current alloc regions");
|
|
||||||
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
|
|
||||||
"should not be used for humongous allocations");
|
|
||||||
assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
|
|
||||||
|
|
||||||
assert(!cur_alloc_region->is_empty(),
|
|
||||||
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
|
|
||||||
cur_alloc_region->bottom(), cur_alloc_region->end()));
|
|
||||||
HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert(is_in(result), "result should be in the heap");
|
|
||||||
|
|
||||||
if (with_heap_lock) {
|
|
||||||
Heap_lock->unlock();
|
|
||||||
}
|
|
||||||
assert_heap_not_locked();
|
|
||||||
// Do the dirtying after we release the Heap_lock.
|
|
||||||
dirty_young_block(result, word_size);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (with_heap_lock) {
|
|
||||||
assert_heap_locked();
|
|
||||||
} else {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the comment in the .hpp file about the locking protocol and
|
|
||||||
// assumptions of this method (and other related ones).
|
|
||||||
inline HeapWord*
|
|
||||||
G1CollectedHeap::attempt_allocation(size_t word_size) {
|
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(!isHumongous(word_size), "attempt_allocation() should not be called "
|
assert(!isHumongous(word_size), "attempt_allocation() should not "
|
||||||
"for humongous allocation requests");
|
"be called for humongous allocation requests");
|
||||||
|
|
||||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
|
||||||
if (cur_alloc_region != NULL) {
|
false /* bot_updates */);
|
||||||
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
|
if (result == NULL) {
|
||||||
word_size,
|
result = attempt_allocation_slow(word_size, gc_count_before_ret);
|
||||||
false /* with_heap_lock */);
|
|
||||||
assert_heap_not_locked();
|
|
||||||
if (result != NULL) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
assert_heap_not_locked();
|
||||||
// Our attempt to allocate lock-free failed as the current
|
|
||||||
// allocation region is either NULL or full. So, we'll now take the
|
|
||||||
// Heap_lock and retry.
|
|
||||||
Heap_lock->lock();
|
|
||||||
|
|
||||||
HeapWord* result = attempt_allocation_locked(word_size);
|
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert_heap_not_locked();
|
dirty_young_block(result, word_size);
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
return result;
|
||||||
assert_heap_locked();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void
|
|
||||||
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
|
|
||||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
|
|
||||||
"pre-condition of the call");
|
|
||||||
assert(cur_alloc_region->is_young(),
|
|
||||||
"we only support young current alloc regions");
|
|
||||||
|
|
||||||
// The region is guaranteed to be young
|
|
||||||
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
|
|
||||||
_summary_bytes_used += cur_alloc_region->used();
|
|
||||||
_cur_alloc_region = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline HeapWord*
|
|
||||||
G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
|
|
||||||
assert_heap_locked_and_not_at_safepoint();
|
|
||||||
assert(!isHumongous(word_size), "attempt_allocation_locked() "
|
|
||||||
"should not be called for humongous allocation requests");
|
|
||||||
|
|
||||||
// First, reread the current alloc region and retry the allocation
|
|
||||||
// in case somebody replaced it while we were waiting to get the
|
|
||||||
// Heap_lock.
|
|
||||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
|
||||||
if (cur_alloc_region != NULL) {
|
|
||||||
HeapWord* result = allocate_from_cur_alloc_region(
|
|
||||||
cur_alloc_region, word_size,
|
|
||||||
true /* with_heap_lock */);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We failed to allocate out of the current alloc region, so let's
|
|
||||||
// retire it before getting a new one.
|
|
||||||
retire_cur_alloc_region(cur_alloc_region);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked();
|
|
||||||
// Try to get a new region and allocate out of it
|
|
||||||
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
|
|
||||||
false, /* at_safepoint */
|
|
||||||
true, /* do_dirtying */
|
|
||||||
false /* can_expand */);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert_heap_not_locked();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_heap_locked();
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// It dirties the cards that cover the block so that so that the post
|
// It dirties the cards that cover the block so that so that the post
|
||||||
|
|
|
@ -360,6 +360,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
|
||||||
set_young_index_in_cset(-1);
|
set_young_index_in_cset(-1);
|
||||||
uninstall_surv_rate_group();
|
uninstall_surv_rate_group();
|
||||||
set_young_type(NotYoung);
|
set_young_type(NotYoung);
|
||||||
|
reset_pre_dummy_top();
|
||||||
|
|
||||||
if (!par) {
|
if (!par) {
|
||||||
// If this is parallel, this will be done later.
|
// If this is parallel, this will be done later.
|
||||||
|
@ -923,11 +924,11 @@ void G1OffsetTableContigSpace::set_saved_mark() {
|
||||||
ContiguousSpace::set_saved_mark();
|
ContiguousSpace::set_saved_mark();
|
||||||
OrderAccess::storestore();
|
OrderAccess::storestore();
|
||||||
_gc_time_stamp = curr_gc_time_stamp;
|
_gc_time_stamp = curr_gc_time_stamp;
|
||||||
// The following fence is to force a flush of the writes above, but
|
// No need to do another barrier to flush the writes above. If
|
||||||
// is strictly not needed because when an allocating worker thread
|
// this is called in parallel with other threads trying to
|
||||||
// calls set_saved_mark() it does so under the ParGCRareEvent_lock;
|
// allocate into the region, the caller should call this while
|
||||||
// when the lock is released, the write will be flushed.
|
// holding a lock and when the lock is released the writes will be
|
||||||
// OrderAccess::fence();
|
// flushed.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,6 +149,13 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
|
||||||
G1BlockOffsetArrayContigSpace _offsets;
|
G1BlockOffsetArrayContigSpace _offsets;
|
||||||
Mutex _par_alloc_lock;
|
Mutex _par_alloc_lock;
|
||||||
volatile unsigned _gc_time_stamp;
|
volatile unsigned _gc_time_stamp;
|
||||||
|
// When we need to retire an allocation region, while other threads
|
||||||
|
// are also concurrently trying to allocate into it, we typically
|
||||||
|
// allocate a dummy object at the end of the region to ensure that
|
||||||
|
// no more allocations can take place in it. However, sometimes we
|
||||||
|
// want to know where the end of the last "real" object we allocated
|
||||||
|
// into the region was and this is what this keeps track.
|
||||||
|
HeapWord* _pre_dummy_top;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
|
// Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
|
||||||
|
@ -163,6 +170,17 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
|
||||||
virtual void set_saved_mark();
|
virtual void set_saved_mark();
|
||||||
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
||||||
|
|
||||||
|
// See the comment above in the declaration of _pre_dummy_top for an
|
||||||
|
// explanation of what it is.
|
||||||
|
void set_pre_dummy_top(HeapWord* pre_dummy_top) {
|
||||||
|
assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
|
||||||
|
_pre_dummy_top = pre_dummy_top;
|
||||||
|
}
|
||||||
|
HeapWord* pre_dummy_top() {
|
||||||
|
return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
|
||||||
|
}
|
||||||
|
void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
|
||||||
|
|
||||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||||
virtual void clear(bool mangle_space);
|
virtual void clear(bool mangle_space);
|
||||||
|
|
||||||
|
|
|
@ -38,15 +38,8 @@ inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
|
||||||
// this is used for larger LAB allocations only.
|
// this is used for larger LAB allocations only.
|
||||||
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
|
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
|
||||||
MutexLocker x(&_par_alloc_lock);
|
MutexLocker x(&_par_alloc_lock);
|
||||||
// This ought to be just "allocate", because of the lock above, but that
|
// Given that we take the lock no need to use par_allocate() here.
|
||||||
// ContiguousSpace::allocate asserts that either the allocating thread
|
HeapWord* res = ContiguousSpace::allocate(size);
|
||||||
// holds the heap lock or it is the VM thread and we're at a safepoint.
|
|
||||||
// The best I (dld) could figure was to put a field in ContiguousSpace
|
|
||||||
// meaning "locking at safepoint taken care of", and set/reset that
|
|
||||||
// here. But this will do for now, especially in light of the comment
|
|
||||||
// above. Perhaps in the future some lock-free manner of keeping the
|
|
||||||
// coordination.
|
|
||||||
HeapWord* res = ContiguousSpace::par_allocate(size);
|
|
||||||
if (res != NULL) {
|
if (res != NULL) {
|
||||||
_offsets.alloc_block(res, size);
|
_offsets.alloc_block(res, size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -382,6 +382,11 @@ public:
|
||||||
return (addr_for(pcard) == p);
|
return (addr_for(pcard) == p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HeapWord* align_to_card_boundary(HeapWord* p) {
|
||||||
|
jbyte* pcard = byte_for(p + card_size_in_words - 1);
|
||||||
|
return addr_for(pcard);
|
||||||
|
}
|
||||||
|
|
||||||
// The kinds of precision a CardTableModRefBS may offer.
|
// The kinds of precision a CardTableModRefBS may offer.
|
||||||
enum PrecisionStyle {
|
enum PrecisionStyle {
|
||||||
Precise,
|
Precise,
|
||||||
|
|
|
@ -818,9 +818,14 @@ size_t ContiguousSpace::block_size(const HeapWord* p) const {
|
||||||
// This version requires locking.
|
// This version requires locking.
|
||||||
inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
|
inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
|
||||||
HeapWord* const end_value) {
|
HeapWord* const end_value) {
|
||||||
|
// In G1 there are places where a GC worker can allocates into a
|
||||||
|
// region using this serial allocation code without being prone to a
|
||||||
|
// race with other GC workers (we ensure that no other GC worker can
|
||||||
|
// access the same region at the same time). So the assert below is
|
||||||
|
// too strong in the case of G1.
|
||||||
assert(Heap_lock->owned_by_self() ||
|
assert(Heap_lock->owned_by_self() ||
|
||||||
(SafepointSynchronize::is_at_safepoint() &&
|
(SafepointSynchronize::is_at_safepoint() &&
|
||||||
Thread::current()->is_VM_thread()),
|
(Thread::current()->is_VM_thread() || UseG1GC)),
|
||||||
"not locked");
|
"not locked");
|
||||||
HeapWord* obj = top();
|
HeapWord* obj = top();
|
||||||
if (pointer_delta(end_value, obj) >= size) {
|
if (pointer_delta(end_value, obj) >= size) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue