8236926: Concurrently uncommit memory in G1

Reviewed-by: ayang, tschatzl
This commit is contained in:
Stefan Johansson 2020-11-19 17:55:33 +00:00
parent defdd12e70
commit b8244b606e
25 changed files with 1409 additions and 177 deletions

View file

@ -64,6 +64,7 @@
#include "gc/g1/g1Trace.hpp" #include "gc/g1/g1Trace.hpp"
#include "gc/g1/g1YCTypes.hpp" #include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/g1ServiceThread.hpp" #include "gc/g1/g1ServiceThread.hpp"
#include "gc/g1/g1UncommitRegionTask.hpp"
#include "gc/g1/g1VMOperations.hpp" #include "gc/g1/g1VMOperations.hpp"
#include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionRemSet.hpp"
@ -743,7 +744,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
HeapWord* prev_last_addr = NULL; HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL; HeapRegion* prev_last_region = NULL;
size_t size_used = 0; size_t size_used = 0;
size_t uncommitted_regions = 0; uint shrink_count = 0;
// For each Memregion, free the G1 regions that constitute it, and // For each Memregion, free the G1 regions that constitute it, and
// notify mark-sweep that the range is no longer to be considered 'archive.' // notify mark-sweep that the range is no longer to be considered 'archive.'
@ -792,14 +793,17 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
} else { } else {
curr_region = NULL; curr_region = NULL;
} }
_hrm.shrink_at(curr_index, 1); _hrm.shrink_at(curr_index, 1);
uncommitted_regions++; shrink_count++;
} }
} }
if (uncommitted_regions != 0) { if (shrink_count != 0) {
log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B", log_debug(gc, ergo, heap)("Attempt heap shrinking (archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); HeapRegion::GrainWords * HeapWordSize * shrink_count);
// Explicit uncommit.
uncommit_regions(shrink_count);
} }
decrease_used(size_used); decrease_used(size_used);
} }
@ -1015,6 +1019,7 @@ void G1CollectedHeap::prepare_heap_for_mutators() {
rebuild_region_sets(false /* free_list_only */); rebuild_region_sets(false /* free_list_only */);
abort_refinement(); abort_refinement();
resize_heap_if_necessary(); resize_heap_if_necessary();
uncommit_regions_if_necessary();
// Rebuild the strong code root lists for each region // Rebuild the strong code root lists for each region
rebuild_strong_code_roots(); rebuild_strong_code_roots();
@ -1294,6 +1299,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
shrink_bytes, aligned_shrink_bytes, shrunk_bytes); shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
if (num_regions_removed > 0) { if (num_regions_removed > 0) {
log_debug(gc, heap)("Uncommittable regions after shrink: %u", num_regions_removed);
policy()->record_new_heap_size(num_regions()); policy()->record_new_heap_size(num_regions());
} else { } else {
log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
@ -2611,6 +2617,20 @@ void G1CollectedHeap::gc_epilogue(bool full) {
_collection_pause_end = Ticks::now(); _collection_pause_end = Ticks::now();
} }
uint G1CollectedHeap::uncommit_regions(uint region_limit) {
return _hrm.uncommit_inactive_regions(region_limit);
}
bool G1CollectedHeap::has_uncommittable_regions() {
return _hrm.has_inactive_regions();
}
void G1CollectedHeap::uncommit_regions_if_necessary() {
if (has_uncommittable_regions()) {
G1UncommitRegionTask::enqueue();
}
}
void G1CollectedHeap::verify_numa_regions(const char* desc) { void G1CollectedHeap::verify_numa_regions(const char* desc) {
LogTarget(Trace, gc, heap, verify) lt; LogTarget(Trace, gc, heap, verify) lt;

View file

@ -563,6 +563,12 @@ public:
void resize_heap_if_necessary(); void resize_heap_if_necessary();
// Check if there is memory to uncommit and if so schedule a task to do it.
void uncommit_regions_if_necessary();
// Immediately uncommit uncommittable regions.
uint uncommit_regions(uint region_limit);
bool has_uncommittable_regions();
G1NUMA* numa() const { return _numa; } G1NUMA* numa() const { return _numa; }
// Expand the garbage-first heap by at least the given size (in bytes!). // Expand the garbage-first heap by at least the given size (in bytes!).

View file

@ -0,0 +1,249 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CommittedRegionMap.inline.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
HeapRegionRange::HeapRegionRange(uint start, uint end) :
_start(start),
_end(end) {
assert(start <= end, "Invariant");
}
G1CommittedRegionMap::G1CommittedRegionMap() :
_active(mtGC),
_inactive(mtGC),
_num_active(0),
_num_inactive(0) { }
void G1CommittedRegionMap::initialize(uint num_regions) {
_active.initialize(num_regions);
_inactive.initialize(num_regions);
}
uint G1CommittedRegionMap::num_active() const {
return _num_active;
}
uint G1CommittedRegionMap::num_inactive() const {
return _num_inactive;
}
uint G1CommittedRegionMap::max_length() const {
return (uint) _active.size();
}
void G1CommittedRegionMap::activate(uint start, uint end) {
verify_active_count(start, end, 0);
verify_inactive_count(start, end, 0);
log_debug(gc, heap, region)("Activate regions [%u, %u)", start, end);
active_set_range(start, end);
}
void G1CommittedRegionMap::reactivate(uint start, uint end) {
verify_active_count(start, end, 0);
verify_inactive_count(start, end, (end - start));
log_debug(gc, heap, region)("Reactivate regions [%u, %u)", start, end);
active_set_range(start, end);
inactive_clear_range(start, end);
}
void G1CommittedRegionMap::deactivate(uint start, uint end) {
verify_active_count(start, end, (end - start));
verify_inactive_count(start, end, 0);
log_debug(gc, heap, region)("Deactivate regions [%u, %u)", start, end);
active_clear_range(start, end);
inactive_set_range(start, end);
}
void G1CommittedRegionMap::uncommit(uint start, uint end) {
verify_active_count(start, end, 0);
verify_inactive_count(start, end, (end-start));
log_debug(gc, heap, region)("Uncommit regions [%u, %u)", start, end);
inactive_clear_range(start, end);
}
HeapRegionRange G1CommittedRegionMap::next_active_range(uint offset) const {
// Find first active index from offset.
uint start = (uint) _active.get_next_one_offset(offset);
if (start == max_length()) {
// Early out when no active regions are found.
return HeapRegionRange(max_length(), max_length());
}
uint end = (uint) _active.get_next_zero_offset(start);
verify_active_range(start, end);
return HeapRegionRange(start, end);
}
HeapRegionRange G1CommittedRegionMap::next_committable_range(uint offset) const {
// We should only call this function when there are no inactive regions.
verify_no_inactive_regons();
// Find first free region from offset.
uint start = (uint) _active.get_next_zero_offset(offset);
if (start == max_length()) {
// Early out when no free regions are found.
return HeapRegionRange(max_length(), max_length());
}
uint end = (uint) _active.get_next_one_offset(start);
verify_free_range(start, end);
return HeapRegionRange(start, end);
}
HeapRegionRange G1CommittedRegionMap::next_inactive_range(uint offset) const {
// Find first inactive region from offset.
uint start = (uint) _inactive.get_next_one_offset(offset);
if (start == max_length()) {
// Early when no inactive regions are found.
return HeapRegionRange(max_length(), max_length());
}
uint end = (uint) _inactive.get_next_zero_offset(start);
verify_inactive_range(start, end);
return HeapRegionRange(start, end);
}
void G1CommittedRegionMap::active_set_range(uint start, uint end) {
guarantee_mt_safety_active();
_active.par_set_range(start, end, BitMap::unknown_range);
_num_active += (end - start);
}
void G1CommittedRegionMap::active_clear_range(uint start, uint end) {
guarantee_mt_safety_active();
_active.par_clear_range(start, end, BitMap::unknown_range);
_num_active -= (end - start);
}
void G1CommittedRegionMap::inactive_set_range(uint start, uint end) {
guarantee_mt_safety_inactive();
_inactive.par_set_range(start, end, BitMap::unknown_range);
_num_inactive += (end - start);
}
void G1CommittedRegionMap::inactive_clear_range(uint start, uint end) {
guarantee_mt_safety_inactive();
_inactive.par_clear_range(start, end, BitMap::unknown_range);
_num_inactive -= (end - start);
}
void G1CommittedRegionMap::guarantee_mt_safety_active() const {
// G1CommittedRegionMap _active-map MT safety protocol:
// (a) If we're at a safepoint, the caller must either be the VM thread or
// hold the FreeList_lock.
// (b) If we're not at a safepoint, the caller must hold the Heap_lock.
// Protocol only applies after initialization is complete.
if (!Universe::is_fully_initialized()) {
return;
}
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self(),
"G1CommittedRegionMap _active-map MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(),
"G1CommittedRegionMap _active-map MT safety protocol outside a safepoint");
}
}
void G1CommittedRegionMap::guarantee_mt_safety_inactive() const {
// G1CommittedRegionMap _inactive-map MT safety protocol:
// (a) If we're at a safepoint, the caller must either be the VM thread or
// hold the FreeList_lock.
// (b) If we're not at a safepoint, the caller must hold the Uncommit_lock.
// Protocol only applies after initialization is complete.
if (!Universe::is_fully_initialized()) {
return;
}
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self(),
"G1CommittedRegionMap MT safety protocol at a safepoint");
} else {
guarantee(Uncommit_lock->owned_by_self(),
"G1CommittedRegionMap MT safety protocol outside a safepoint");
}
}
#ifdef ASSERT
void G1CommittedRegionMap::verify_active_range(uint start, uint end) const {
assert(active(start), "First region (%u) is not active", start);
assert(active(end - 1), "Last region (%u) is not active", end - 1);
assert(end == _active.size() || !active(end), "Region (%u) is active but not included in range", end);
}
void G1CommittedRegionMap::verify_inactive_range(uint start, uint end) const {
assert(inactive(start), "First region (%u) is not inactive", start);
assert(inactive(end - 1), "Last region (%u) in range is not inactive", end - 1);
assert(end == _inactive.size() || !inactive(end), "Region (%u) is inactive but not included in range", end);
}
void G1CommittedRegionMap::verify_free_range(uint start, uint end) const {
assert(!active(start), "First region (%u) is active", start);
assert(!active(end - 1), "Last region (%u) in range is active", end - 1);
}
void G1CommittedRegionMap::verify_no_inactive_regons() const {
BitMap::idx_t first_inactive = _inactive.get_next_one_offset(0);
assert(first_inactive == _inactive.size(), "Should be no inactive regions, but was at index: " SIZE_FORMAT, first_inactive);
}
void G1CommittedRegionMap::verify_active_count(uint start, uint end, uint expected) const {
uint found = (uint) _active.count_one_bits(start, end);
assert(found == expected, "Unexpected number of active regions, found: %u, expected: %u", found, expected);
}
void G1CommittedRegionMap::verify_inactive_count(uint start, uint end, uint expected) const {
uint found = (uint) _inactive.count_one_bits(start, end);
assert(found == expected, "Unexpected number of inactive regions, found: %u, expected: %u", found, expected);
}
#endif //ASSERT

View file

@ -0,0 +1,124 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1COMMITTEDREGIONMAP_HPP
#define SHARE_GC_G1_G1COMMITTEDREGIONMAP_HPP
#include "memory/allocation.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/macros.hpp"
// Helper class to define a range [start, end) of regions.
class HeapRegionRange : public StackObj {
// Inclusive start of the range.
uint _start;
// Exclusive end of the range.
uint _end;
public:
HeapRegionRange(uint start, uint end);
uint start() const { return _start; }
uint end() const { return _end; }
uint length() const { return _end - _start; }
};
// The G1CommittedRegionMap keeps track of which regions are currently committed.
// It tracks both regions ready for use and if there are any regions ready for
// uncommit. We basically have three states. Uncommitted, Active, Inactive. All
// regions that are either Active or Inactive are committed.
//
// State transitions:
// Uncommitted -> Active (activate())
// Active -> Inactive (deactivate())
// Inactive -> Active (reactivate())
// Inactive -> Uncommitted (uncommit())
//
class G1CommittedRegionMap : public CHeapObj<mtGC> {
// Each bit in this bitmap indicates that the corresponding region is active
// and available for allocation.
CHeapBitMap _active;
// Each bit in this bitmap indicates that the corresponding region is no longer
// active and it can be uncommitted.
CHeapBitMap _inactive;
// The union of these two bitmaps are the regions that are currently committed.
// The number of regions active and available for use.
uint _num_active;
// The number of regions ready to be uncommitted.
uint _num_inactive;
uint max_length() const;
// Helpers to mark and do accounting for the bitmaps. Depending on when called
// these helpers require to own different locks. See guarantee_mt_safety_* for
// details.
void active_set_range(uint start, uint end);
void active_clear_range(uint start, uint end);
void inactive_set_range(uint start, uint end);
void inactive_clear_range(uint start, uint end);
public:
G1CommittedRegionMap();
void initialize(uint num_regions);
uint num_active() const;
uint num_inactive() const;
// Check if a region is marked active.
inline bool active(uint index) const;
// Check if a region is marked inactive.
inline bool inactive(uint index) const;
// Mark a range of regions as active.
void activate(uint start, uint end);
// Mark a range of regions as inactive and ready to be uncommitted.
void deactivate(uint start, uint end);
// Mark a range of regions active again and no longer ready for uncommit.
void reactivate(uint start, uint end);
// Uncommit a range of inactive regions.
void uncommit(uint start, uint end);
// Finds the next range of active regions starting at offset.
HeapRegionRange next_active_range(uint offset) const;
// Finds the next range of inactive regions starting at offset.
HeapRegionRange next_inactive_range(uint offset) const;
// Finds the next range of committable regions starting at offset.
// This function must only be called when no inactive regions are
// present and can be used to activate more regions.
HeapRegionRange next_committable_range(uint offset) const;
protected:
virtual void guarantee_mt_safety_active() const;
virtual void guarantee_mt_safety_inactive() const;
void verify_active_range(uint start, uint end) const NOT_DEBUG_RETURN;
void verify_free_range(uint start, uint end) const NOT_DEBUG_RETURN;
void verify_inactive_range(uint start, uint end) const NOT_DEBUG_RETURN;
void verify_no_inactive_regons() const NOT_DEBUG_RETURN;
void verify_active_count(uint start, uint end, uint expected) const NOT_DEBUG_RETURN;
void verify_inactive_count(uint start, uint end, uint expected) const NOT_DEBUG_RETURN;
};
#endif // SHARE_GC_G1_G1COMMITTEDREGIONMAP_HPP

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1COMMITTEDREGIONMAP_INLINE_HPP
#define SHARE_GC_G1_G1COMMITTEDREGIONMAP_INLINE_HPP
#include "gc/g1/g1CommittedRegionMap.hpp"
#include "utilities/bitMap.inline.hpp"
inline bool G1CommittedRegionMap::active(uint index) const {
return _active.at(index);
}
inline bool G1CommittedRegionMap::inactive(uint index) const {
return _inactive.at(index);
}
#endif // SHARE_GC_G1_G1COMMITTEDREGIONMAP_INLINE_HPP

View file

@ -1160,6 +1160,7 @@ void G1ConcurrentMark::remark() {
} }
_g1h->resize_heap_if_necessary(); _g1h->resize_heap_if_necessary();
_g1h->uncommit_regions_if_necessary();
compute_new_sizes(); compute_new_sizes();

View file

@ -98,6 +98,18 @@ public:
} }
} }
void active(HeapRegion* hr) {
if (is_active()) {
print("ACTIVE", hr);
}
}
void inactive(HeapRegion* hr) {
if (is_active()) {
print("INACTIVE", hr);
}
}
void uncommit(HeapRegion* hr) { void uncommit(HeapRegion* hr) {
if (is_active()) { if (is_active()) {
print("UNCOMMIT", hr); print("UNCOMMIT", hr);

View file

@ -187,7 +187,9 @@ void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_pa
bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) { bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
// We need to make sure to commit all pages covered by the given area. // We need to make sure to commit all pages covered by the given area.
guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted"); guarantee(is_area_uncommitted(start_page, size_in_pages),
"Specified area is not uncommitted, start page: " SIZE_FORMAT ", page count: " SIZE_FORMAT,
start_page, size_in_pages);
bool zero_filled = true; bool zero_filled = true;
size_t end_page = start_page + size_in_pages; size_t end_page = start_page + size_in_pages;
@ -196,12 +198,12 @@ bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
// Check for dirty pages and update zero_filled if any found. // Check for dirty pages and update zero_filled if any found.
if (_dirty.get_next_one_offset(start_page, end_page) < end_page) { if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
zero_filled = false; zero_filled = false;
_dirty.clear_range(start_page, end_page); _dirty.par_clear_range(start_page, end_page, BitMap::unknown_range);
} }
} else { } else {
commit_internal(start_page, end_page); commit_internal(start_page, end_page);
} }
_committed.set_range(start_page, end_page); _committed.par_set_range(start_page, end_page, BitMap::unknown_range);
return zero_filled; return zero_filled;
} }
@ -215,18 +217,20 @@ void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_pa
} }
void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) { void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
guarantee(is_area_committed(start_page, size_in_pages), "checking"); guarantee(is_area_committed(start_page, size_in_pages),
"Specified area is not committed, start page: " SIZE_FORMAT ", page count: " SIZE_FORMAT,
start_page, size_in_pages);
size_t end_page = start_page + size_in_pages; size_t end_page = start_page + size_in_pages;
if (_special) { if (_special) {
// Mark that memory is dirty. If committed again the memory might // Mark that memory is dirty. If committed again the memory might
// need to be cleared explicitly. // need to be cleared explicitly.
_dirty.set_range(start_page, end_page); _dirty.par_set_range(start_page, end_page, BitMap::unknown_range);
} else { } else {
uncommit_internal(start_page, end_page); uncommit_internal(start_page, end_page);
} }
_committed.clear_range(start_page, end_page); _committed.par_clear_range(start_page, end_page, BitMap::unknown_range);
} }
void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) { void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) {

View file

@ -28,6 +28,7 @@
#include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/virtualspace.hpp" #include "memory/virtualspace.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
@ -50,6 +51,14 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
MemTracker::record_virtual_memory_type((address)rs.base(), type); MemTracker::record_virtual_memory_type((address)rs.base(), type);
} }
// Used to manually signal a mapper to handle a set of regions as committed.
// Setting the 'zero_filled' parameter to false signals the mapper that the
// regions have not been cleared by the OS and that they need to be clear
// explicitly.
void G1RegionToSpaceMapper::signal_mapping_changed(uint start_idx, size_t num_regions) {
fire_on_commit(start_idx, num_regions, false);
}
// G1RegionToSpaceMapper implementation where the region granularity is larger than // G1RegionToSpaceMapper implementation where the region granularity is larger than
// or the same as the commit granularity. // or the same as the commit granularity.
// Basically, the space corresponding to one region region spans several OS pages. // Basically, the space corresponding to one region region spans several OS pages.
@ -70,7 +79,21 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
} }
bool is_range_committed(uint start_idx, size_t num_regions) {
BitMap::idx_t end = start_idx + num_regions;
return _region_commit_map.get_next_zero_offset(start_idx, end) == end;
}
bool is_range_uncommitted(uint start_idx, size_t num_regions) {
BitMap::idx_t end = start_idx + num_regions;
return _region_commit_map.get_next_one_offset(start_idx, end) == end;
}
virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
guarantee(is_range_uncommitted(start_idx, num_regions),
"Range not uncommitted, start: %u, num_regions: " SIZE_FORMAT,
start_idx, num_regions);
const size_t start_page = (size_t)start_idx * _pages_per_region; const size_t start_page = (size_t)start_idx * _pages_per_region;
const size_t size_in_pages = num_regions * _pages_per_region; const size_t size_in_pages = num_regions * _pages_per_region;
bool zero_filled = _storage.commit(start_page, size_in_pages); bool zero_filled = _storage.commit(start_page, size_in_pages);
@ -84,13 +107,17 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
if (AlwaysPreTouch) { if (AlwaysPreTouch) {
_storage.pretouch(start_page, size_in_pages, pretouch_gang); _storage.pretouch(start_page, size_in_pages, pretouch_gang);
} }
_region_commit_map.set_range(start_idx, start_idx + num_regions); _region_commit_map.par_set_range(start_idx, start_idx + num_regions, BitMap::unknown_range);
fire_on_commit(start_idx, num_regions, zero_filled); fire_on_commit(start_idx, num_regions, zero_filled);
} }
virtual void uncommit_regions(uint start_idx, size_t num_regions) { virtual void uncommit_regions(uint start_idx, size_t num_regions) {
guarantee(is_range_committed(start_idx, num_regions),
"Range not committed, start: %u, num_regions: " SIZE_FORMAT,
start_idx, num_regions);
_storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_region_commit_map.clear_range(start_idx, start_idx + num_regions); _region_commit_map.par_clear_range(start_idx, start_idx + num_regions, BitMap::unknown_range);
} }
}; };
@ -99,6 +126,17 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
// Basically, the contents of one OS page span several regions. // Basically, the contents of one OS page span several regions.
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t _regions_per_page; size_t _regions_per_page;
// Lock to prevent bitmap updates and the actual underlying
// commit to get out of order. This can happen in the cases
// where one thread is expanding the heap during a humongous
// allocation and at the same time the service thread is
// doing uncommit. These operations will not operate on the
// same regions, but they might operate on regions sharing
// an underlying OS page. So we need to make sure that both
// those resources are in sync:
// - G1RegionToSpaceMapper::_region_commit_map;
// - G1PageBasedVirtualSpace::_committed (_storage.commit())
Mutex _lock;
size_t region_idx_to_page_idx(uint region_idx) const { size_t region_idx_to_page_idx(uint region_idx) const {
return region_idx / _regions_per_page; return region_idx / _regions_per_page;
@ -128,7 +166,8 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t commit_factor, size_t commit_factor,
MEMFLAGS type) : MEMFLAGS type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
_regions_per_page((page_size * commit_factor) / alloc_granularity) { _regions_per_page((page_size * commit_factor) / alloc_granularity),
_lock(Mutex::leaf, "G1 mapper lock", true, Mutex::_safepoint_check_never) {
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
} }
@ -148,29 +187,36 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t end_page = region_idx_to_page_idx(region_limit - 1); size_t end_page = region_idx_to_page_idx(region_limit - 1);
bool all_zero_filled = true; bool all_zero_filled = true;
for (size_t page = start_page; page <= end_page; page++) {
if (!is_page_committed(page)) {
// Page not committed.
if (num_committed == 0) {
first_committed = page;
}
num_committed++;
if (!_storage.commit(page, 1)) { // Concurrent operations might operate on regions sharing the same
// Found dirty region during commit. // underlying OS page. See lock declaration for more details.
{
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
for (size_t page = start_page; page <= end_page; page++) {
if (!is_page_committed(page)) {
// Page not committed.
if (num_committed == 0) {
first_committed = page;
}
num_committed++;
if (!_storage.commit(page, 1)) {
// Found dirty region during commit.
all_zero_filled = false;
}
// Move memory to correct NUMA node for the heap.
numa_request_on_node(page);
} else {
// Page already committed.
all_zero_filled = false; all_zero_filled = false;
} }
// Move memory to correct NUMA node for the heap.
numa_request_on_node(page);
} else {
// Page already committed.
all_zero_filled = false;
} }
}
// Update the commit map for the given range. // Update the commit map for the given range. Not using the par_set_range
_region_commit_map.set_range(start_idx, region_limit); // since updates to _region_commit_map for this mapper is protected by _lock.
_region_commit_map.set_range(start_idx, region_limit, BitMap::unknown_range);
}
if (AlwaysPreTouch && num_committed > 0) { if (AlwaysPreTouch && num_committed > 0) {
_storage.pretouch(first_committed, num_committed, pretouch_gang); _storage.pretouch(first_committed, num_committed, pretouch_gang);
@ -188,8 +234,12 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t start_page = region_idx_to_page_idx(start_idx); size_t start_page = region_idx_to_page_idx(start_idx);
size_t end_page = region_idx_to_page_idx(region_limit - 1); size_t end_page = region_idx_to_page_idx(region_limit - 1);
// Clear commit map for the given range. // Concurrent operations might operate on regions sharing the same
_region_commit_map.clear_range(start_idx, region_limit); // underlying OS page. See lock declaration for more details.
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
// Clear commit map for the given range. Not using the par_clear_range since
// updates to _region_commit_map for this mapper is protected by _lock.
_region_commit_map.clear_range(start_idx, region_limit, BitMap::unknown_range);
for (size_t page = start_page; page <= end_page; page++) { for (size_t page = start_page; page <= end_page; page++) {
// We know all pages were committed before clearing the map. If the // We know all pages were committed before clearing the map. If the

View file

@ -66,6 +66,8 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; } void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
void signal_mapping_changed(uint start_idx, size_t num_regions);
virtual ~G1RegionToSpaceMapper() {} virtual ~G1RegionToSpaceMapper() {}
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0; virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;

View file

@ -205,16 +205,12 @@ void G1ServiceThread::register_task(G1ServiceTask* task, jlong delay) {
// Associate the task with the service thread. // Associate the task with the service thread.
task->set_service_thread(this); task->set_service_thread(this);
// Schedule the task to run after the given delay. // Schedule the task to run after the given delay. The service will be
// notified to check if this task is first in the queue.
schedule_task(task, delay); schedule_task(task, delay);
// Notify the service thread that there is a new task, thread might
// be waiting and the newly added task might be first in the list.
MonitorLocker ml(&_monitor, Mutex::_no_safepoint_check_flag);
ml.notify();
} }
void G1ServiceThread::schedule_task(G1ServiceTask* task, jlong delay_ms) { void G1ServiceThread::schedule(G1ServiceTask* task, jlong delay_ms) {
guarantee(task->is_registered(), "Must be registered before scheduled"); guarantee(task->is_registered(), "Must be registered before scheduled");
guarantee(task->next() == NULL, "Task already in queue"); guarantee(task->next() == NULL, "Task already in queue");
@ -229,6 +225,11 @@ void G1ServiceThread::schedule_task(G1ServiceTask* task, jlong delay_ms) {
task->name(), TimeHelper::counter_to_seconds(task->time())); task->name(), TimeHelper::counter_to_seconds(task->time()));
} }
void G1ServiceThread::schedule_task(G1ServiceTask* task, jlong delay_ms) {
schedule(task, delay_ms);
notify();
}
int64_t G1ServiceThread::time_to_next_task_ms() { int64_t G1ServiceThread::time_to_next_task_ms() {
assert(_monitor.owned_by_self(), "Must be owner of lock"); assert(_monitor.owned_by_self(), "Must be owner of lock");
assert(!_task_queue.is_empty(), "Should not be called for empty list"); assert(!_task_queue.is_empty(), "Should not be called for empty list");
@ -243,6 +244,11 @@ int64_t G1ServiceThread::time_to_next_task_ms() {
return (int64_t) TimeHelper::counter_to_millis(time_diff); return (int64_t) TimeHelper::counter_to_millis(time_diff);
} }
void G1ServiceThread::notify() {
MonitorLocker ml(&_monitor, Mutex::_no_safepoint_check_flag);
ml.notify();
}
void G1ServiceThread::sleep_before_next_cycle() { void G1ServiceThread::sleep_before_next_cycle() {
if (should_terminate()) { if (should_terminate()) {
return; return;
@ -309,8 +315,7 @@ void G1ServiceThread::run_service() {
} }
void G1ServiceThread::stop_service() { void G1ServiceThread::stop_service() {
MonitorLocker ml(&_monitor, Mutex::_no_safepoint_check_flag); notify();
ml.notify();
} }
G1ServiceTask::G1ServiceTask(const char* name) : G1ServiceTask::G1ServiceTask(const char* name) :
@ -328,7 +333,9 @@ bool G1ServiceTask::is_registered() {
} }
void G1ServiceTask::schedule(jlong delay_ms) { void G1ServiceTask::schedule(jlong delay_ms) {
_service_thread->schedule_task(this, delay_ms); assert(Thread::current() == _service_thread,
"Can only be used when already running on the service thread");
_service_thread->schedule(this, delay_ms);
} }
const char* G1ServiceTask::name() { const char* G1ServiceTask::name() {

View file

@ -61,8 +61,9 @@ public:
virtual void execute() = 0; virtual void execute() = 0;
protected: protected:
// Schedule the task on the associated service thread // Schedule the task on the associated service thread using
// using the provided delay in milliseconds. // the provided delay in milliseconds. Can only be used when
// currently running on the service thread.
void schedule(jlong delay_ms); void schedule(jlong delay_ms);
// These setters are protected for use by testing and the // These setters are protected for use by testing and the
@ -122,8 +123,13 @@ class G1ServiceThread: public ConcurrentGCThread {
G1ServiceTask* pop_due_task(); G1ServiceTask* pop_due_task();
void run_task(G1ServiceTask* task); void run_task(G1ServiceTask* task);
// Schedule a registered task to run after the given delay. // Helper used by both schedule_task() and G1ServiceTask::schedule()
void schedule_task(G1ServiceTask* task, jlong delay); // to schedule a registered task to run after the given delay.
void schedule(G1ServiceTask* task, jlong delay);
// Notify a change to the service thread. Used to either stop
// the service or to force check for new tasks.
void notify();
public: public:
G1ServiceThread(); G1ServiceThread();
@ -133,6 +139,10 @@ public:
// Register a task with the service thread and schedule it. If // Register a task with the service thread and schedule it. If
// no delay is specified the task is scheduled to run directly. // no delay is specified the task is scheduled to run directly.
void register_task(G1ServiceTask* task, jlong delay = 0); void register_task(G1ServiceTask* task, jlong delay = 0);
// Schedule the task and notify the service thread that a new
// task might be ready to run.
void schedule_task(G1ServiceTask* task, jlong delay);
}; };
#endif // SHARE_GC_G1_G1SERVICETHREAD_HPP #endif // SHARE_GC_G1_G1SERVICETHREAD_HPP

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1UncommitRegionTask.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "runtime/globals.hpp"
#include "utilities/ticks.hpp"
G1UncommitRegionTask* G1UncommitRegionTask::_instance = NULL;
G1UncommitRegionTask::G1UncommitRegionTask() :
G1ServiceTask("G1 Uncommit Region Task"),
_active(false),
_summary_duration(),
_summary_region_count(0) { }
void G1UncommitRegionTask::initialize() {
assert(_instance == NULL, "Already initialized");
_instance = new G1UncommitRegionTask();
// Register the task with the service thread. This will automatically
// schedule the task so we change the state to active.
_instance->set_active(true);
G1CollectedHeap::heap()->service_thread()->register_task(_instance);
}
G1UncommitRegionTask* G1UncommitRegionTask::instance() {
if (_instance == NULL) {
initialize();
}
return _instance;
}
void G1UncommitRegionTask::enqueue() {
assert_at_safepoint_on_vm_thread();
G1UncommitRegionTask* uncommit_task = instance();
if (!uncommit_task->is_active()) {
// Change state to active and schedule with no delay.
uncommit_task->set_active(true);
G1CollectedHeap::heap()->service_thread()->schedule_task(uncommit_task, 0);
}
}
bool G1UncommitRegionTask::is_active() {
return _active;
}
void G1UncommitRegionTask::set_active(bool state) {
assert(_active != state, "Must do a state change");
// There is no need to guard _active with a lock since the places where it
// is updated can never run in parallel. The state is set to true only in
// a safepoint and it is set to false while running on the service thread
// joined with the suspendible thread set.
_active = state;
}
void G1UncommitRegionTask::report_execution(Tickspan time, uint regions) {
_summary_region_count += regions;
_summary_duration += time;
log_trace(gc, heap)("Concurrent Uncommit: " SIZE_FORMAT "%s, %u regions, %1.3fms",
byte_size_in_proper_unit(regions * HeapRegion::GrainBytes),
proper_unit_for_byte_size(regions * HeapRegion::GrainBytes),
regions,
time.seconds() * 1000);
}
void G1UncommitRegionTask::report_summary() {
log_debug(gc, heap)("Concurrent Uncommit Summary: " SIZE_FORMAT "%s, %u regions, %1.3fms",
byte_size_in_proper_unit(_summary_region_count * HeapRegion::GrainBytes),
proper_unit_for_byte_size(_summary_region_count * HeapRegion::GrainBytes),
_summary_region_count,
_summary_duration.seconds() * 1000);
}
void G1UncommitRegionTask::clear_summary() {
_summary_duration = Tickspan();
_summary_region_count = 0;
}
void G1UncommitRegionTask::execute() {
assert(_active, "Must be active");
// Translate the size limit into a number of regions. This cannot be a
// compile time constant because G1HeapRegionSize is set ergonomically.
static const uint region_limit = (uint) (UncommitSizeLimit / G1HeapRegionSize);
// Prevent from running during a GC pause.
SuspendibleThreadSetJoiner sts;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Ticks start = Ticks::now();
uint uncommit_count = g1h->uncommit_regions(region_limit);
Tickspan uncommit_time = (Ticks::now() - start);
if (uncommit_count > 0) {
report_execution(uncommit_time, uncommit_count);
}
// Reschedule if there are more regions to uncommit, otherwise
// change state to inactive.
if (g1h->has_uncommittable_regions()) {
// No delay, reason to reschedule rather then to loop is to allow
// other tasks to run without waiting for a full uncommit cycle.
schedule(0);
} else {
// Nothing more to do, change state and report a summary.
set_active(false);
report_summary();
clear_summary();
}
}

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1UNCOMMITREGIONTASK_HPP
#define SHARE_GC_G1_G1UNCOMMITREGIONTASK_HPP
#include "gc/g1/g1ServiceThread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ticks.hpp"
class G1UncommitRegionTask : public G1ServiceTask {
// Each execution of the uncommit task is limited to uncommit at most 256M.
// This limit is small enough to ensure that the duration of each invocation
// is short, while still making reasonable progress.
static const uint UncommitSizeLimit = 256 * M;
static G1UncommitRegionTask* _instance;
static void initialize();
static G1UncommitRegionTask* instance();
// The _active state is used to prevent the task from being enqueued on the
// service thread multiple times. If the task is active, a new requst to
// enqueue it will be ignored.
bool _active;
// Members to keep a summary of the current concurrent uncommit
// work. Used for printing when no more work is available.
Tickspan _summary_duration;
uint _summary_region_count;
G1UncommitRegionTask();
bool is_active();
void set_active(bool state);
void report_execution(Tickspan time, uint regions);
void report_summary();
void clear_summary();
public:
static void enqueue();
virtual void execute();
};
#endif

View file

@ -26,6 +26,7 @@
#include "gc/g1/g1Arguments.hpp" #include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1CommittedRegionMap.inline.hpp"
#include "gc/g1/g1NUMAStats.hpp" #include "gc/g1/g1NUMAStats.hpp"
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionManager.inline.hpp"
@ -59,16 +60,11 @@ public:
const char* get_description() { return "Free Regions"; } const char* get_description() { return "Free Regions"; }
}; };
HeapRegionRange::HeapRegionRange(uint start, uint end) : _start(start), _end(end) {
assert(start <= end, "Invariant");
}
HeapRegionManager::HeapRegionManager() : HeapRegionManager::HeapRegionManager() :
_bot_mapper(NULL), _bot_mapper(NULL),
_cardtable_mapper(NULL), _cardtable_mapper(NULL),
_card_counts_mapper(NULL), _card_counts_mapper(NULL),
_available_map(mtGC), _committed_map(),
_num_committed(0),
_allocated_heapregions_length(0), _allocated_heapregions_length(0),
_regions(), _heap_mapper(NULL), _regions(), _heap_mapper(NULL),
_prev_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL),
@ -96,11 +92,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
_regions.initialize(heap_storage->reserved(), HeapRegion::GrainBytes); _regions.initialize(heap_storage->reserved(), HeapRegion::GrainBytes);
_available_map.initialize(_regions.length()); _committed_map.initialize(reserved_length());
}
bool HeapRegionManager::is_available(uint region) const {
return _available_map.at(region);
} }
HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) { HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
@ -174,13 +166,26 @@ HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
return g1h->new_heap_region(hrm_index, mr); return g1h->new_heap_region(hrm_index, mr);
} }
void HeapRegionManager::expand(uint start, uint num_regions, WorkGang* pretouch_gang) {
commit_regions(start, num_regions, pretouch_gang);
for (uint i = start; i < start + num_regions; i++) {
HeapRegion* hr = _regions.get_by_index(i);
if (hr == NULL) {
hr = new_heap_region(i);
OrderAccess::storestore();
_regions.set_by_index(i, hr);
_allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
}
G1CollectedHeap::heap()->hr_printer()->commit(hr);
}
activate_regions(start, num_regions);
}
void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) { void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
guarantee(num_regions > 0, "Must commit more than zero regions"); guarantee(num_regions > 0, "Must commit more than zero regions");
guarantee(num_regions <= available(), guarantee(num_regions <= available(),
"Cannot commit more than the maximum amount of regions"); "Cannot commit more than the maximum amount of regions");
_num_committed += (uint)num_regions;
_heap_mapper->commit_regions(index, num_regions, pretouch_gang); _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
// Also commit auxiliary data // Also commit auxiliary data
@ -193,26 +198,21 @@ void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang*
_card_counts_mapper->commit_regions(index, num_regions, pretouch_gang); _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
} }
void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start); guarantee(num_regions > 0, "No point in calling this for zero regions");
guarantee(_num_committed >= num_regions, "pre-condition");
// Reset node index to distinguish with committed regions. uint end = start + num_regions;
for (uint i = start; i < start + num_regions; i++) { G1HRPrinter* printer = G1CollectedHeap::heap()->hr_printer();
at(i)->set_node_index(G1NUMA::UnknownNodeIndex); if (printer->is_active()) {
} for (uint i = start; i < end; i++) {
// Can't use at() here since region is no longer marked available.
// Print before uncommitting. HeapRegion* hr = _regions.get_by_index(i);
if (G1CollectedHeap::heap()->hr_printer()->is_active()) { assert(hr != NULL, "Region should still be present");
for (uint i = start; i < start + num_regions; i++) { printer->uncommit(hr);
HeapRegion* hr = at(i);
G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
} }
} }
_num_committed -= (uint)num_regions; // Uncommit heap memory
_available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
_heap_mapper->uncommit_regions(start, num_regions); _heap_mapper->uncommit_regions(start, num_regions);
// Also uncommit auxiliary data // Also uncommit auxiliary data
@ -223,35 +223,63 @@ void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
_cardtable_mapper->uncommit_regions(start, num_regions); _cardtable_mapper->uncommit_regions(start, num_regions);
_card_counts_mapper->uncommit_regions(start, num_regions); _card_counts_mapper->uncommit_regions(start, num_regions);
_committed_map.uncommit(start, end);
} }
void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) { void HeapRegionManager::initialize_regions(uint start, uint num_regions) {
guarantee(num_regions > 0, "No point in calling this for zero regions");
commit_regions(start, num_regions, pretouch_gang);
for (uint i = start; i < start + num_regions; i++) {
if (_regions.get_by_index(i) == NULL) {
HeapRegion* new_hr = new_heap_region(i);
OrderAccess::storestore();
_regions.set_by_index(i, new_hr);
_allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
}
}
_available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
for (uint i = start; i < start + num_regions; i++) { for (uint i = start; i < start + num_regions; i++) {
assert(is_available(i), "Just made region %u available but is apparently not.", i); assert(is_available(i), "Just made region %u available but is apparently not.", i);
HeapRegion* hr = at(i); HeapRegion* hr = at(i);
if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
G1CollectedHeap::heap()->hr_printer()->commit(hr);
}
hr->initialize(); hr->initialize();
hr->set_node_index(G1NUMA::numa()->index_for_region(hr)); hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
insert_into_free_list(at(i)); insert_into_free_list(hr);
G1CollectedHeap::heap()->hr_printer()->active(hr);
} }
} }
void HeapRegionManager::activate_regions(uint start, uint num_regions) {
_committed_map.activate(start, start + num_regions);
initialize_regions(start, num_regions);
}
void HeapRegionManager::reactivate_regions(uint start, uint num_regions) {
assert(num_regions > 0, "No point in calling this for zero regions");
clear_auxiliary_data_structures(start, num_regions);
_committed_map.reactivate(start, start + num_regions);
initialize_regions(start, num_regions);
}
void HeapRegionManager::deactivate_regions(uint start, uint num_regions) {
assert(num_regions > 0, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
assert(length() >= num_regions, "pre-condition");
// Reset NUMA index to and print state change.
uint end = start + num_regions;
for (uint i = start; i < end; i++) {
HeapRegion* hr = at(i);
hr->set_node_index(G1NUMA::UnknownNodeIndex);
G1CollectedHeap::heap()->hr_printer()->inactive(hr);
}
_committed_map.deactivate(start, end);
}
void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_regions) {
// Signal marking bitmaps to clear the given regions.
_prev_bitmap_mapper->signal_mapping_changed(start, num_regions);
_next_bitmap_mapper->signal_mapping_changed(start, num_regions);
// Signal G1BlockOffsetTable to clear the given regions.
_bot_mapper->signal_mapping_changed(start, num_regions);
// Signal G1CardTable to clear the given regions.
_cardtable_mapper->signal_mapping_changed(start, num_regions);
// Signal G1CardCounts to clear the given regions.
_card_counts_mapper->signal_mapping_changed(start, num_regions);
}
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const { MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
size_t used_sz = size_t used_sz =
_prev_bitmap_mapper->committed_size() + _prev_bitmap_mapper->committed_size() +
@ -270,31 +298,88 @@ MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
return MemoryUsage(0, used_sz, committed_sz, committed_sz); return MemoryUsage(0, used_sz, committed_sz, committed_sz);
} }
uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) { bool HeapRegionManager::has_inactive_regions() const {
return expand_at(0, num_regions, pretouch_workers); return _committed_map.num_inactive() > 0;
} }
uint HeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) { uint HeapRegionManager::uncommit_inactive_regions(uint limit) {
if (num_regions == 0) { assert(limit > 0, "Need to specify at least one region to uncommit");
return 0;
}
uint offset = start; uint uncommitted = 0;
uint offset = 0;
do {
MutexLocker uc(Uncommit_lock, Mutex::_no_safepoint_check_flag);
HeapRegionRange range = _committed_map.next_inactive_range(offset);
// No more regions available for uncommit. Return the number of regions
// already uncommitted or 0 if there were no longer any inactive regions.
if (range.length() == 0) {
return uncommitted;
}
uint start = range.start();
uint num_regions = MIN2(range.length(), limit - uncommitted);
uncommitted += num_regions;
uncommit_regions(start, num_regions);
} while (uncommitted < limit);
assert(uncommitted == limit, "Invariant");
return uncommitted;
}
uint HeapRegionManager::expand_inactive(uint num_regions) {
uint offset = 0;
uint expanded = 0; uint expanded = 0;
do { do {
HeapRegionRange regions = find_unavailable_from_idx(offset); HeapRegionRange regions = _committed_map.next_inactive_range(offset);
if (regions.length() == 0) { if (regions.length() == 0) {
// No more unavailable regions. // No more unavailable regions.
break; break;
} }
uint to_expand = MIN2(num_regions - expanded, regions.length()); uint to_expand = MIN2(num_regions - expanded, regions.length());
make_regions_available(regions.start(), to_expand, pretouch_workers); reactivate_regions(regions.start(), to_expand);
expanded += to_expand; expanded += to_expand;
offset = regions.end(); offset = regions.end();
} while (expanded < num_regions); } while (expanded < num_regions);
return expanded;
}
uint HeapRegionManager::expand_any(uint num_regions, WorkGang* pretouch_workers) {
assert(num_regions > 0, "Must expand at least 1 region");
uint offset = 0;
uint expanded = 0;
do {
HeapRegionRange regions = _committed_map.next_committable_range(offset);
if (regions.length() == 0) {
// No more unavailable regions.
break;
}
uint to_expand = MIN2(num_regions - expanded, regions.length());
expand(regions.start(), to_expand, pretouch_workers);
expanded += to_expand;
offset = regions.end();
} while (expanded < num_regions);
return expanded;
}
uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
assert(num_regions > 0, "Must expand at least 1 region");
// First "undo" any requests to uncommit memory concurrently by
// reverting such regions to being available.
uint expanded = expand_inactive(num_regions);
// Commit more regions if needed.
if (expanded < num_regions) {
expanded += expand_any(num_regions - expanded, pretouch_workers);
}
verify_optional(); verify_optional();
return expanded; return expanded;
} }
@ -304,9 +389,24 @@ void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkGang* pre
uint end = start + num_regions; uint end = start + num_regions;
for (uint i = start; i < end; i++) { for (uint i = start; i < end; i++) {
if (!is_available(i)) { // First check inactive. If the regions is inactive, try to reactivate it
make_regions_available(i, 1, pretouch_workers); // before it get uncommitted by the G1SeriveThread.
if (_committed_map.inactive(i)) {
// Need to grab the lock since this can be called by a java thread
// doing humongous allocations.
MutexLocker uc(Uncommit_lock, Mutex::_no_safepoint_check_flag);
// State might change while getting the lock.
if (_committed_map.inactive(i)) {
reactivate_regions(i, 1);
}
} }
// Not else-if to catch the case where the inactive region was uncommited
// while waiting to get the lock.
if (!_committed_map.active(i)) {
expand(i, 1, pretouch_workers);
}
assert(at(i)->is_free(), "Region must be free at this point");
} }
verify_optional(); verify_optional();
@ -385,15 +485,13 @@ uint HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_
} }
uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) { uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) {
BitMap::idx_t range_start = 0;
BitMap::idx_t range_end = range_start;
uint candidate = G1_NO_HRM_INDEX; uint candidate = G1_NO_HRM_INDEX;
HeapRegionRange range(0,0);
do { do {
range_start = _available_map.get_next_one_offset(range_end); range = _committed_map.next_active_range(range.end());
range_end = _available_map.get_next_zero_offset(range_start); candidate = find_contiguous_in_range(range.start(), range.end(), num_regions);
candidate = find_contiguous_in_range((uint) range_start, (uint) range_end, num_regions); } while (candidate == G1_NO_HRM_INDEX && range.end() < reserved_length());
} while (candidate == G1_NO_HRM_INDEX && range_end < reserved_length());
return candidate; return candidate;
} }
@ -435,28 +533,6 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
} }
} }
HeapRegionRange HeapRegionManager::find_unavailable_from_idx(uint index) const {
guarantee(index <= reserved_length(), "checking");
// Find first unavailable region from offset.
BitMap::idx_t start = _available_map.get_next_zero_offset(index);
if (start == _available_map.size()) {
// No unavailable regions found.
return HeapRegionRange(reserved_length(), reserved_length());
}
// The end of the range is the next available region.
BitMap::idx_t end = _available_map.get_next_one_offset(start);
assert(!_available_map.at(start), "Found region (" SIZE_FORMAT ") is not unavailable", start);
assert(!_available_map.at(end - 1), "Last region (" SIZE_FORMAT ") in range is not unavailable", end - 1);
assert(end == _available_map.size() || _available_map.at(end), "Region (" SIZE_FORMAT ") is not available", end);
// Shrink returned range to number of regions left to commit if necessary.
end = MIN2(start + available(), end);
return HeapRegionRange((uint) start, (uint) end);
}
uint HeapRegionManager::find_highest_free(bool* expanded) { uint HeapRegionManager::find_highest_free(bool* expanded) {
// Loop downwards from the highest region index, looking for an // Loop downwards from the highest region index, looking for an
// entry which is either free or not yet committed. If not yet // entry which is either free or not yet committed. If not yet
@ -578,7 +654,8 @@ void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
assert(at(i)->is_free(), "Expected free region at index %u", i); assert(at(i)->is_free(), "Expected free region at index %u", i);
} }
#endif #endif
uncommit_regions(index, num_regions); // Mark regions as inactive making them ready for uncommit.
deactivate_regions(index, (uint) num_regions);
} }
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
@ -617,9 +694,9 @@ void HeapRegionManager::verify() {
guarantee(_allocated_heapregions_length <= reserved_length(), guarantee(_allocated_heapregions_length <= reserved_length(),
"invariant: _allocated_length: %u _max_length: %u", "invariant: _allocated_length: %u _max_length: %u",
_allocated_heapregions_length, reserved_length()); _allocated_heapregions_length, reserved_length());
guarantee(_num_committed <= max_length(), guarantee(length() <= max_length(),
"invariant: _num_committed: %u max_regions: %u", "invariant: committed regions: %u max_regions: %u",
_num_committed, max_length()); length(), max_length());
bool prev_committed = true; bool prev_committed = true;
uint num_committed = 0; uint num_committed = 0;
@ -650,7 +727,7 @@ void HeapRegionManager::verify() {
guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
} }
guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed); guarantee(num_committed == length(), "Found %u committed regions, but should be %u", num_committed, length());
_free_list.verify(); _free_list.verify();
} }

View file

@ -26,6 +26,7 @@
#define SHARE_GC_G1_HEAPREGIONMANAGER_HPP #define SHARE_GC_G1_HEAPREGIONMANAGER_HPP
#include "gc/g1/g1BiasedArray.hpp" #include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CommittedRegionMap.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/heapRegionSet.hpp" #include "gc/g1/heapRegionSet.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
@ -42,20 +43,6 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
virtual HeapRegion* default_value() const { return NULL; } virtual HeapRegion* default_value() const { return NULL; }
}; };
// Helper class to define a range [start, end) of regions.
class HeapRegionRange : public StackObj {
// Inclusive start of the range.
uint _start;
// Exclusive end of the range.
uint _end;
public:
HeapRegionRange(uint start, uint end);
uint start() const { return _start; }
uint end() const { return _end; }
uint length() const { return _end - _start; }
};
// This class keeps track of the actual heap memory, auxiliary data // This class keeps track of the actual heap memory, auxiliary data
// and its metadata (i.e., HeapRegion instances) and the list of free regions. // and its metadata (i.e., HeapRegion instances) and the list of free regions.
// //
@ -91,12 +78,9 @@ class HeapRegionManager: public CHeapObj<mtGC> {
G1RegionToSpaceMapper* _cardtable_mapper; G1RegionToSpaceMapper* _cardtable_mapper;
G1RegionToSpaceMapper* _card_counts_mapper; G1RegionToSpaceMapper* _card_counts_mapper;
// Each bit in this bitmap indicates that the corresponding region is available // Keeps track of the currently committed regions in the heap. The committed regions
// for allocation. // can either be active (ready for use) or inactive (ready for uncommit).
CHeapBitMap _available_map; G1CommittedRegionMap _committed_map;
// The number of regions committed in the heap.
uint _num_committed;
// Internal only. The highest heap region +1 we allocated a HeapRegion instance for. // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
uint _allocated_heapregions_length; uint _allocated_heapregions_length;
@ -107,6 +91,9 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// Pass down commit calls to the VirtualSpace. // Pass down commit calls to the VirtualSpace.
void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL); void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
// Initialize the HeapRegions in the range and put them on the free list.
void initialize_regions(uint start, uint num_regions);
// Find a contiguous set of empty or uncommitted regions of length num_regions and return // Find a contiguous set of empty or uncommitted regions of length num_regions and return
// the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful. // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
// Start and end defines the range to seek in, policy is first-fit. // Start and end defines the range to seek in, policy is first-fit.
@ -120,10 +107,6 @@ class HeapRegionManager: public CHeapObj<mtGC> {
void assert_contiguous_range(uint start, uint num_regions) NOT_DEBUG_RETURN; void assert_contiguous_range(uint start, uint num_regions) NOT_DEBUG_RETURN;
// Finds the next sequence of unavailable regions starting at the given index. Returns the
// sequence found as a HeapRegionRange. If no regions can be found, both start and end of
// the returned range is equal to reserved_length().
HeapRegionRange find_unavailable_from_idx(uint index) const;
// Finds the next sequence of empty regions starting from start_idx, going backwards in // Finds the next sequence of empty regions starting from start_idx, going backwards in
// the heap. Returns the length of the sequence found. If this value is zero, no // the heap. Returns the length of the sequence found. If this value is zero, no
// sequence could be found, otherwise res_idx contains the start index of this range. // sequence could be found, otherwise res_idx contains the start index of this range.
@ -132,14 +115,30 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// Checks the G1MemoryNodeManager to see if this region is on the preferred node. // Checks the G1MemoryNodeManager to see if this region is on the preferred node.
bool is_on_preferred_index(uint region_index, uint preferred_node_index); bool is_on_preferred_index(uint region_index, uint preferred_node_index);
// Clear the auxiliary data structures by notifying them that the mapping has
// changed. The structures that needs to be cleared will than clear. This is
// used to allow reuse regions scheduled for uncommit without uncommitting and
// then committing them.
void clear_auxiliary_data_structures(uint start, uint num_regions);
G1HeapRegionTable _regions; G1HeapRegionTable _regions;
G1RegionToSpaceMapper* _heap_mapper; G1RegionToSpaceMapper* _heap_mapper;
G1RegionToSpaceMapper* _prev_bitmap_mapper; G1RegionToSpaceMapper* _prev_bitmap_mapper;
G1RegionToSpaceMapper* _next_bitmap_mapper; G1RegionToSpaceMapper* _next_bitmap_mapper;
FreeRegionList _free_list; FreeRegionList _free_list;
void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL); void expand(uint index, uint num_regions, WorkGang* pretouch_gang = NULL);
void uncommit_regions(uint index, size_t num_regions = 1);
// G1RegionCommittedMap helpers. These functions do the work that comes with
// the state changes tracked by G1CommittedRegionMap. To make sure this is
// safe from a multi-threading point of view there are two lock protocols in
// G1RegionCommittedMap::guarantee_mt_safety_* that are enforced. The lock
// needed should have been acquired before calling these functions.
void activate_regions(uint index, uint num_regions);
void deactivate_regions(uint start, uint num_regions);
void reactivate_regions(uint start, uint num_regions);
void uncommit_regions(uint start, uint num_regions);
// Allocate a new HeapRegion for the given index. // Allocate a new HeapRegion for the given index.
HeapRegion* new_heap_region(uint hrm_index); HeapRegion* new_heap_region(uint hrm_index);
@ -149,6 +148,10 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// Expand helper for cases when the regions to expand are well defined. // Expand helper for cases when the regions to expand are well defined.
void expand_exact(uint start, uint num_regions, WorkGang* pretouch_workers); void expand_exact(uint start, uint num_regions, WorkGang* pretouch_workers);
// Expand helper activating inactive regions rather than committing new ones.
uint expand_inactive(uint num_regions);
// Expand helper finding new regions to commit.
uint expand_any(uint num_regions, WorkGang* pretouch_workers);
#ifdef ASSERT #ifdef ASSERT
public: public:
@ -180,7 +183,7 @@ public:
inline HeapRegion* at_or_null(uint index) const; inline HeapRegion* at_or_null(uint index) const;
// Returns whether the given region is available for allocation. // Returns whether the given region is available for allocation.
bool is_available(uint region) const; inline bool is_available(uint region) const;
// Return the next region (by index) that is part of the same // Return the next region (by index) that is part of the same
// humongous object that hr is part of. // humongous object that hr is part of.
@ -233,8 +236,8 @@ public:
// Return the number of regions available (uncommitted) regions. // Return the number of regions available (uncommitted) regions.
uint available() const { return max_length() - length(); } uint available() const { return max_length() - length(); }
// Return the number of regions that have been committed in the heap. // Return the number of regions currently active and available for use.
uint length() const { return _num_committed; } uint length() const { return _committed_map.num_active(); }
// The number of regions reserved for the heap. // The number of regions reserved for the heap.
uint reserved_length() const { return (uint)_regions.length(); } uint reserved_length() const { return (uint)_regions.length(); }
@ -252,11 +255,6 @@ public:
// number of regions might be smaller than what's desired. // number of regions might be smaller than what's desired.
uint expand_by(uint num_regions, WorkGang* pretouch_workers); uint expand_by(uint num_regions, WorkGang* pretouch_workers);
// Makes sure that the regions from start to start+num_regions-1 are available
// for allocation. Returns the number of regions that were committed to achieve
// this.
uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
// Try to expand on the given node index, returning the index of the new region. // Try to expand on the given node index, returning the index of the new region.
uint expand_on_preferred_node(uint node_index); uint expand_on_preferred_node(uint node_index);
@ -282,10 +280,17 @@ public:
// Return the actual number of uncommitted regions. // Return the actual number of uncommitted regions.
uint shrink_by(uint num_regions_to_remove); uint shrink_by(uint num_regions_to_remove);
// Uncommit a number of regions starting at the specified index, which must be available, // Remove a number of regions starting at the specified index, which must be available,
// empty, and free. // empty, and free. The regions are marked inactive and can later be uncommitted.
void shrink_at(uint index, size_t num_regions); void shrink_at(uint index, size_t num_regions);
// Check if there are any inactive regions that can be uncommitted.
bool has_inactive_regions() const;
// Uncommit inactive regions. Limit the number of regions to uncommit and return
// actual number uncommitted.
uint uncommit_inactive_regions(uint limit);
void verify(); void verify();
// Do some sanity checking. // Do some sanity checking.

View file

@ -25,10 +25,15 @@
#ifndef SHARE_GC_G1_HEAPREGIONMANAGER_INLINE_HPP #ifndef SHARE_GC_G1_HEAPREGIONMANAGER_INLINE_HPP
#define SHARE_GC_G1_HEAPREGIONMANAGER_INLINE_HPP #define SHARE_GC_G1_HEAPREGIONMANAGER_INLINE_HPP
#include "gc/g1/g1CommittedRegionMap.inline.hpp"
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionManager.hpp" #include "gc/g1/heapRegionManager.hpp"
#include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp"
inline bool HeapRegionManager::is_available(uint region) const {
return _committed_map.active(region);
}
inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const { inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
assert(addr < heap_end(), assert(addr < heap_end(),
"addr: " PTR_FORMAT " end: " PTR_FORMAT, p2i(addr), p2i(heap_end())); "addr: " PTR_FORMAT " end: " PTR_FORMAT, p2i(addr), p2i(heap_end()));

View file

@ -96,6 +96,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/heapRegionManager.hpp"
#include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionRemSet.hpp"
#endif // INCLUDE_G1GC #endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC #if INCLUDE_PARALLELGC
@ -504,6 +505,13 @@ WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1RegionSize: G1 GC is not enabled"); THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1RegionSize: G1 GC is not enabled");
WB_END WB_END
WB_ENTRY(jboolean, WB_G1HasRegionsToUncommit(JNIEnv* env, jobject o))
if (UseG1GC) {
return G1CollectedHeap::heap()->has_uncommittable_regions();
}
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1HasRegionsToUncommit: G1 GC is not enabled");
WB_END
#endif // INCLUDE_G1GC #endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC #if INCLUDE_PARALLELGC
@ -2302,6 +2310,7 @@ static JNINativeMethod methods[] = {
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions }, {CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize }, {CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
{CC"g1StartConcMarkCycle", CC"()Z", (void*)&WB_G1StartMarkCycle }, {CC"g1StartConcMarkCycle", CC"()Z", (void*)&WB_G1StartMarkCycle },
{CC"g1HasRegionsToUncommit", CC"()Z", (void*)&WB_G1HasRegionsToUncommit},
{CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;", {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
(void*)&WB_G1AuxiliaryMemoryUsage }, (void*)&WB_G1AuxiliaryMemoryUsage },
{CC"g1ActiveMemoryNodeCount", CC"()I", (void*)&WB_G1ActiveMemoryNodeCount }, {CC"g1ActiveMemoryNodeCount", CC"()I", (void*)&WB_G1ActiveMemoryNodeCount },

View file

@ -113,6 +113,7 @@ Mutex* OopMapCacheAlloc_lock = NULL;
Mutex* FreeList_lock = NULL; Mutex* FreeList_lock = NULL;
Mutex* OldSets_lock = NULL; Mutex* OldSets_lock = NULL;
Mutex* Uncommit_lock = NULL;
Monitor* RootRegionScan_lock = NULL; Monitor* RootRegionScan_lock = NULL;
Mutex* Management_lock = NULL; Mutex* Management_lock = NULL;
@ -220,6 +221,7 @@ void mutex_init() {
def(FreeList_lock , PaddedMutex , leaf , true, _safepoint_check_never); def(FreeList_lock , PaddedMutex , leaf , true, _safepoint_check_never);
def(OldSets_lock , PaddedMutex , leaf , true, _safepoint_check_never); def(OldSets_lock , PaddedMutex , leaf , true, _safepoint_check_never);
def(Uncommit_lock , PaddedMutex , leaf + 1 , true, _safepoint_check_never);
def(RootRegionScan_lock , PaddedMonitor, leaf , true, _safepoint_check_never); def(RootRegionScan_lock , PaddedMonitor, leaf , true, _safepoint_check_never);
def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never); def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never);

View file

@ -108,6 +108,7 @@ extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_m
extern Mutex* FreeList_lock; // protects the free region list during safepoints extern Mutex* FreeList_lock; // protects the free region list during safepoints
extern Mutex* OldSets_lock; // protects the old region sets extern Mutex* OldSets_lock; // protects the old region sets
extern Mutex* Uncommit_lock; // protects the uncommit list when not at safepoints
extern Monitor* RootRegionScan_lock; // used to notify that the CM threads have finished scanning the IM snapshot regions extern Monitor* RootRegionScan_lock; // used to notify that the CM threads have finished scanning the IM snapshot regions
extern Mutex* Management_lock; // a lock used to serialize JVM management extern Mutex* Management_lock; // a lock used to serialize JVM management

View file

@ -0,0 +1,130 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CommittedRegionMap.inline.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
class G1CommittedRegionMapSerial : public G1CommittedRegionMap {
public:
static const uint TestRegions = 512;
void verify_counts() {
verify_active_count(0, TestRegions, num_active());
verify_inactive_count(0, TestRegions, num_inactive());
}
protected:
void guarantee_mt_safety_active() const { }
void guarantee_mt_safety_inactive() const { }
};
static bool mutate() {
return os::random() % 2 == 0;
}
static void generate_random_map(G1CommittedRegionMap* map) {
for (uint i = 0; i < G1CommittedRegionMapSerial::TestRegions; i++) {
if (mutate()) {
map->activate(i, i+1);
}
}
if (map->num_active() == 0) {
// If we randomly activated 0 regions, activate the first half
// to have some regions to test.
map->activate(0, G1CommittedRegionMapSerial::TestRegions / 2);
}
}
static void random_deactivate(G1CommittedRegionMap* map) {
uint current_offset = 0;
do {
HeapRegionRange current = map->next_active_range(current_offset);
if (mutate()) {
if (current.length() < 5) {
// For short ranges, deactivate whole.
map->deactivate(current.start(), current.end());
} else {
// For larger ranges, deactivate half.
map->deactivate(current.start(), current.end() - (current.length() / 2));
}
}
current_offset = current.end();
} while (current_offset != G1CommittedRegionMapSerial::TestRegions);
}
static void random_uncommit_or_reactive(G1CommittedRegionMap* map) {
uint current_offset = 0;
do {
HeapRegionRange current = map->next_inactive_range(current_offset);
// Randomly either reactivate or uncommit
if (mutate()) {
map->reactivate(current.start(), current.end());
} else {
map->uncommit(current.start(), current.end());
}
current_offset = current.end();
} while (current_offset != G1CommittedRegionMapSerial::TestRegions);
}
static void random_activate_free(G1CommittedRegionMap* map) {
uint current_offset = 0;
do {
HeapRegionRange current = map->next_committable_range(current_offset);
// Randomly either reactivate or uncommit
if (mutate()) {
if (current.length() < 5) {
// For short ranges, deactivate whole.
map->activate(current.start(), current.end());
} else {
// For larger ranges, deactivate half.
map->activate(current.start(), current.end() - (current.length() / 2));
}
}
current_offset = current.end();
} while (current_offset != G1CommittedRegionMapSerial::TestRegions);
}
TEST(G1CommittedRegionMapTest, serial) {
G1CommittedRegionMapSerial serial_map;
serial_map.initialize(G1CommittedRegionMapSerial::TestRegions);
// Activate some regions
generate_random_map(&serial_map);
// Work through the map and mutate it
for (int i = 0; i < 500; i++) {
random_deactivate(&serial_map);
serial_map.verify_counts();
random_uncommit_or_reactive(&serial_map);
serial_map.verify_counts();
random_activate_free(&serial_map);
serial_map.verify_counts();
ASSERT_EQ(serial_map.num_inactive(), 0u);
}
}

View file

@ -0,0 +1,120 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1BlockOffsetTable.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "memory/virtualspace.hpp"
#include "gc/shared/workgroup.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
class G1MapperWorkers : AllStatic {
static WorkGang* _work_gang;
static WorkGang* work_gang() {
if (_work_gang == NULL) {
_work_gang = new WorkGang("G1 Small Workers", MaxWorkers, false, false);
_work_gang->initialize_workers();
_work_gang->update_active_workers(MaxWorkers);
}
return _work_gang;
}
public:
static const uint MaxWorkers = 4;
static void run_task(AbstractGangTask* task) {
work_gang()->run_task(task);
}
};
WorkGang* G1MapperWorkers::_work_gang = NULL;
class G1TestCommitUncommit : public AbstractGangTask {
G1RegionToSpaceMapper* _mapper;
uint _claim_id;
public:
G1TestCommitUncommit(G1RegionToSpaceMapper* mapper) :
AbstractGangTask("Stress mapper"),
_mapper(mapper),
_claim_id(0) { }
void work(uint worker_id) {
uint index = Atomic::fetch_and_add(&_claim_id, 1u);
for (int i = 0; i < 100000; i++) {
// Stress commit and uncommit of a single region. The same
// will be done for multiple adjacent region to make sure
// we properly handle bitmap updates as well as updates for
// regions sharing the same underlying OS page.
_mapper->commit_regions(index);
_mapper->uncommit_regions(index);
}
}
};
TEST_VM(G1RegionToSpaceMapper, smallStressAdjacent) {
// Fake a heap with 1m regions and create a BOT like mapper. This
// will give a G1RegionsSmallerThanCommitSizeMapper to stress.
uint num_regions = G1MapperWorkers::MaxWorkers;
size_t region_size = 1*M;
size_t size = G1BlockOffsetTable::compute_size(num_regions * region_size / HeapWordSize);
size_t page_size = os::vm_page_size();
ReservedSpace rs(size, os::vm_page_size());
G1RegionToSpaceMapper* small_mapper =
G1RegionToSpaceMapper::create_mapper(rs,
size,
page_size,
region_size,
G1BlockOffsetTable::heap_map_factor(),
mtGC);
G1TestCommitUncommit task(small_mapper);
G1MapperWorkers::run_task(&task);
}
TEST_VM(G1RegionToSpaceMapper, largeStressAdjacent) {
// Fake a heap with 2m regions and create a BOT like mapper. This
// will give a G1RegionsLargerThanCommitSizeMapper to stress.
uint num_regions = G1MapperWorkers::MaxWorkers;
size_t region_size = 2*M;
size_t size = G1BlockOffsetTable::compute_size(num_regions * region_size / HeapWordSize);
size_t page_size = os::vm_page_size();
ReservedSpace rs(size, page_size);
G1RegionToSpaceMapper* large_mapper =
G1RegionToSpaceMapper::create_mapper(rs,
size,
page_size,
region_size,
G1BlockOffsetTable::heap_map_factor(),
mtGC);
G1TestCommitUncommit task(large_mapper);
G1MapperWorkers::run_task(&task);
}

View file

@ -153,7 +153,7 @@ public class TestShrinkAuxiliaryData {
static class ShrinkAuxiliaryDataTest { static class ShrinkAuxiliaryDataTest {
public static void main(String[] args) throws IOException { public static void main(String[] args) throws Exception {
ShrinkAuxiliaryDataTest testCase = new ShrinkAuxiliaryDataTest(); ShrinkAuxiliaryDataTest testCase = new ShrinkAuxiliaryDataTest();
@ -220,7 +220,7 @@ public class TestShrinkAuxiliaryData {
private final List<GarbageObject> garbage = new ArrayList<>(); private final List<GarbageObject> garbage = new ArrayList<>();
public void test() throws IOException { public void test() throws Exception {
MemoryUsage muFull, muFree, muAuxDataFull, muAuxDataFree; MemoryUsage muFull, muFree, muAuxDataFull, muAuxDataFree;
float auxFull, auxFree; float auxFull, auxFree;
@ -242,6 +242,14 @@ public class TestShrinkAuxiliaryData {
deallocate(); deallocate();
System.gc(); System.gc();
if (WhiteBox.getWhiteBox().g1HasRegionsToUncommit()) {
System.out.println("Waiting for concurrent uncommit to complete");
do {
Thread.sleep(1000);
} while(WhiteBox.getWhiteBox().g1HasRegionsToUncommit());
System.out.println("Concurrent uncommit done");
}
muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
muAuxDataFree = WhiteBox.getWhiteBox().g1AuxiliaryMemoryUsage(); muAuxDataFree = WhiteBox.getWhiteBox().g1AuxiliaryMemoryUsage();

View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package gc.stress;
/*
* @test TestStressUncommit
* @key stress
* @summary Stress uncommitting by allocating and releasing memory
* @requires vm.gc.G1
* @library /test/lib
* @modules java.base/jdk.internal.misc
* @run driver/timeout=1300 gc.stress.TestStressG1Uncommit
*/
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import com.sun.management.ThreadMXBean;
import jdk.test.lib.Asserts;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
public class TestStressG1Uncommit {
public static void main(String[] args) throws Exception {
ArrayList<String> options = new ArrayList<>();
Collections.addAll(options,
"-Xlog:gc,gc+heap+region=debug",
"-XX:+UseG1GC",
StressUncommit.class.getName()
);
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(options);
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldHaveExitValue(0);
output.shouldMatch("Uncommit regions");
output.outputTo(System.out);
}
}
class StressUncommit {
private static final long M = 1024 * 1024;
private static final long G = 1024 * M;
private static final Instant StartTime = Instant.now();
private static final ThreadMXBean threadBean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
private static final MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
private static ConcurrentLinkedQueue<Object> globalKeepAlive;
public static void main(String args[]) throws InterruptedException {
// Leave 20% head room to try to avoid Full GCs.
long allocationSize = (long) (Runtime.getRuntime().maxMemory() * 0.8);
// Figure out suitable number of workers (~1 per gig).
int gigsOfAllocation = (int) Math.ceil((double) allocationSize / G);
int numWorkers = Math.min(gigsOfAllocation, Runtime.getRuntime().availableProcessors());
long workerAllocation = allocationSize / numWorkers;
log("Using " + numWorkers + " workers, each allocating: ~" + (workerAllocation / M) + "M");
ExecutorService workers = Executors.newFixedThreadPool(numWorkers);
try {
int iteration = 1;
// Run for 60 seconds.
while (uptime() < 60) {
log("Interation: " + iteration++);
globalKeepAlive = new ConcurrentLinkedQueue<>();
// Submit work to executor.
CountDownLatch workersRunning = new CountDownLatch(numWorkers);
for (int j = 0; j < numWorkers; j++) {
// Submit worker task.
workers.submit(() -> {
allocateToLimit(workerAllocation);
workersRunning.countDown();
});
}
// Wait for tasks to complete.
workersRunning.await();
// Clear the reference holding all task allocations alive.
globalKeepAlive = null;
// Do a GC that should shrink the heap.
long committedBefore = memoryBean.getHeapMemoryUsage().getCommitted();
System.gc();
long committedAfter = memoryBean.getHeapMemoryUsage().getCommitted();
Asserts.assertLessThan(committedAfter, committedBefore);
}
} finally {
workers.shutdown();
workers.awaitTermination(5, TimeUnit.SECONDS);
}
}
private static void allocateToLimit(long limit) {
var localKeepAlive = new LinkedList<byte[]>();
long currentAllocation = threadBean.getCurrentThreadAllocatedBytes();
long allocationLimit = currentAllocation + limit;
while (currentAllocation < allocationLimit) {
// Check roughly every megabyte.
for (long j = 0 ; j < 1000; j++) {
localKeepAlive.add(new byte[1024]);
}
currentAllocation = threadBean.getCurrentThreadAllocatedBytes();
}
// Add to globalKeepAlive for realease by main thread.
globalKeepAlive.add(localKeepAlive);
}
private static long uptime() {
return Duration.between(StartTime, Instant.now()).getSeconds();
}
private static void log(String text) {
System.out.println(uptime() + "s: " + text);
}
}

View file

@ -163,6 +163,7 @@ public class WhiteBox {
// G1 // G1
public native boolean g1InConcurrentMark(); public native boolean g1InConcurrentMark();
public native boolean g1HasRegionsToUncommit();
private native boolean g1IsHumongous0(Object o); private native boolean g1IsHumongous0(Object o);
public boolean g1IsHumongous(Object o) { public boolean g1IsHumongous(Object o) {
Objects.requireNonNull(o); Objects.requireNonNull(o);