This commit is contained in:
Zhengyu Gu 2013-09-27 10:08:56 -04:00
commit bfafab7b47
775 changed files with 16519 additions and 6061 deletions

View file

@ -379,3 +379,5 @@ aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
85072013aad46050a362d10ab78e963121c8014c jdk8-b108
566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52

View file

@ -88,7 +88,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \

View file

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=51
HS_BUILD_NUMBER=53
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View file

@ -120,13 +120,13 @@ jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk8=windows_i586_6.1
jprt.my.windows.i586.jdk7=windows_i586_6.1
jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_5.2
jprt.my.windows.x64.jdk7=windows_x64_5.2
jprt.my.windows.x64.jdk8=windows_x64_6.1
jprt.my.windows.x64.jdk7=windows_x64_6.1
jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}

View file

@ -52,6 +52,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver,
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -125,6 +130,11 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);

View file

@ -58,6 +58,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int i486_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -132,6 +137,11 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// add code here, bump the code stub size returned by pd_code_size_limit!
const int i486_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);

View file

@ -49,6 +49,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread,
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int amd64_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -126,6 +131,11 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// returned by pd_code_size_limit!
const int amd64_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);

View file

@ -4219,7 +4219,9 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
}
}
if (!PrintInlining) return;
if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
return;
}
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) {
callee->print_codes();

View file

@ -341,7 +341,7 @@ Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
unsigned int hashValue_arg, bool c_heap, TRAPS) {
assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
// Don't allow symbols to be created which cannot fit in a Symbol*.
@ -685,7 +685,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
if (found_string != NULL) return found_string;
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
Handle string;

View file

@ -160,7 +160,7 @@ address CompiledIC::stub_address() const {
// High-level access to an inline cache. Guaranteed to be MT-safe.
void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
@ -170,8 +170,10 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
assert(bytecode == Bytecodes::_invokeinterface, "");
int itable_index = call_info->itable_index();
entry = VtableStubs::find_itable_stub(itable_index);
if (entry == false) {
return false;
}
#ifdef ASSERT
assert(entry != NULL, "entry not computed");
int index = call_info->resolved_method()->itable_index();
assert(index == itable_index, "CallInfo pre-computes this");
#endif //ASSERT
@ -184,6 +186,9 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
int vtable_index = call_info->vtable_index();
assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
entry = VtableStubs::find_vtable_stub(vtable_index);
if (entry == NULL) {
return false;
}
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
}
@ -200,6 +205,7 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
return true;
}

View file

@ -226,7 +226,10 @@ class CompiledIC: public ResourceObj {
//
void set_to_clean(); // Can only be called during a safepoint operation
void set_to_monomorphic(CompiledICInfo& info);
void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
// Returns true if successful and false otherwise. The call can fail if memory
// allocation in the code cache fails.
bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);

View file

@ -46,12 +46,9 @@ address VtableStub::_chunk = NULL;
address VtableStub::_chunk_end = NULL;
VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
static int num_vtable_chunks = 0;
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
@ -60,7 +57,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
return NULL;
}
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
@ -121,6 +118,12 @@ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
} else {
s = create_itable_stub(vtable_index);
}
// Creation of vtable or itable can fail if there is not enough free space in the code cache.
if (s == NULL) {
return NULL;
}
enter(is_vtable_stub, vtable_index, s);
if (PrintAdapterHandlers) {
tty->print_cr("Decoding VtableStub %s[%d]@%d",

View file

@ -0,0 +1,141 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#ifndef PRODUCT
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
guarantee(_base != NULL, "Array not initialized");
guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
}
void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
guarantee(_biased_base != NULL, "Array not initialized");
guarantee(biased_index >= bias() && biased_index < (bias() + length()),
err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
}
void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
guarantee(_biased_base != NULL, "Array not initialized");
guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
}
class TestMappedArray : public G1BiasedMappedArray<int> {
protected:
virtual int default_value() const { return 0xBAADBABE; }
public:
static void test_biasedarray() {
const size_t REGION_SIZE_IN_WORDS = 512;
const size_t NUM_REGIONS = 20;
HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
TestMappedArray array;
array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
REGION_SIZE_IN_WORDS * HeapWordSize);
// Check address calculation (bounds)
assert(array.bottom_address_mapped() == fake_heap,
err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
int* bottom = array.address_mapped_to(fake_heap);
assert((void*)bottom == (void*) array.base(), "must be");
int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
assert((void*)end == (void*)(array.base() + array.length()), "must be");
// The entire array should contain default value elements
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Test setting values in the table
HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
// Set/get by address tests: invert some value; first retrieve one
int actual_value = array.get_by_index(NUM_REGIONS / 2);
array.set_by_index(NUM_REGIONS / 2, ~actual_value);
// Get the same value by address, should correspond to the start of the "region"
int value = array.get_by_address(region_start_address);
assert(value == ~actual_value, "must be");
// Get the same value by address, at one HeapWord before the start
value = array.get_by_address(region_start_address - 1);
assert(value == array.default_value(), "must be");
// Get the same value by address, at the end of the "region"
value = array.get_by_address(region_end_address);
assert(value == ~actual_value, "must be");
// Make sure the next value maps to another index
value = array.get_by_address(region_end_address + 1);
assert(value == array.default_value(), "must be");
// Reset the value in the array
array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
// The entire array should have the default value again
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Set/get by index tests: invert some value
idx_t index = NUM_REGIONS / 2;
actual_value = array.get_by_index(index);
array.set_by_index(index, ~actual_value);
value = array.get_by_index(index);
assert(value == ~actual_value, "must be");
value = array.get_by_index(index - 1);
assert(value == array.default_value(), "must be");
value = array.get_by_index(index + 1);
assert(value == array.default_value(), "must be");
array.set_by_index(0, 0);
value = array.get_by_index(0);
assert(value == 0, "must be");
array.set_by_index(array.length() - 1, 0);
value = array.get_by_index(array.length() - 1);
assert(value == 0, "must be");
array.set_by_index(index, 0);
// The array should have three zeros, and default values otherwise
size_t num_zeros = 0;
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value() || *current == 0, "must be");
if (*current == 0) {
num_zeros++;
}
}
assert(num_zeros == 3, "must be");
}
};
void TestG1BiasedArray_test() {
TestMappedArray::test_biasedarray();
}
#endif

View file

@ -0,0 +1,181 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#include "utilities/debug.hpp"
#include "memory/allocation.inline.hpp"
// Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index.
// The element type is defined by the instantiating the template.
class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
public:
typedef size_t idx_t;
protected:
address _base; // the real base address
size_t _length; // the length of the array
address _biased_base; // base address biased by "bias" elements
size_t _bias; // the bias, i.e. the offset biased_base is located to the right in elements
uint _shift_by; // the amount of bits to shift right when mapping to an index of the array.
protected:
G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
_bias(0), _shift_by(0) { }
// Allocate a new array, generic version.
static address create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
}
// Initialize the members of this class. The biased start address of this array
// is the bias (in elements) multiplied by the element size.
void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
assert(base != NULL, "just checking");
assert(length > 0, "just checking");
assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
_base = base;
_length = length;
_biased_base = base - (bias * elem_size);
_bias = bias;
_shift_by = shift_by;
}
// Allocate and initialize this array to cover the heap addresses in the range
// of [bottom, end).
void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
assert(mapping_granularity_in_bytes > 0, "just checking");
assert(is_power_of_2(mapping_granularity_in_bytes),
err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
mapping_granularity_in_bytes, bottom));
assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
mapping_granularity_in_bytes, end));
size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
}
size_t bias() const { return _bias; }
uint shift_by() const { return _shift_by; }
void verify_index(idx_t index) const PRODUCT_RETURN;
void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
public:
// Return the length of the array in elements.
size_t length() const { return _length; }
};
// Array that provides biased access and mapping from (valid) addresses in the
// heap into this array.
template<class T>
class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
public:
typedef G1BiasedMappedArrayBase::idx_t idx_t;
T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
// Return the element of the given array at the given index. Assume
// the index is valid. This is a convenience method that does sanity
// checking on the index.
T get_by_index(idx_t index) const {
verify_index(index);
return this->base()[index];
}
// Set the element of the given array at the given index to the
// given value. Assume the index is valid. This is a convenience
// method that does sanity checking on the index.
void set_by_index(idx_t index, T value) {
verify_index(index);
this->base()[index] = value;
}
// The raw biased base pointer.
T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
// Return the element of the given array that covers the given word in the
// heap. Assumes the index is valid.
T get_by_address(HeapWord* value) const {
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
this->verify_biased_index(biased_index);
return biased_base()[biased_index];
}
// Set the value of the array entry that corresponds to the given array.
void set_by_address(HeapWord * address, T value) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
this->verify_biased_index(biased_index);
biased_base()[biased_index] = value;
}
protected:
// Returns the address of the element the given address maps to
T* address_mapped_to(HeapWord* address) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
this->verify_biased_index_inclusive_end(biased_index);
return biased_base() + biased_index;
}
public:
// Return the smallest address (inclusive) in the heap that this array covers.
HeapWord* bottom_address_mapped() const {
return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
}
// Return the highest address (exclusive) in the heap that this array covers.
HeapWord* end_address_mapped() const {
return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
}
protected:
virtual T default_value() const = 0;
// Set all elements of the given array to the given value.
void clear() {
T value = default_value();
for (idx_t i = 0; i < length(); i++) {
set_by_index(i, value);
}
}
public:
G1BiasedMappedArray() {}
// Allocate and initialize this array to cover the heap addresses in the range
// of [bottom, end).
void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
this->clear();
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP

View file

@ -2069,8 +2069,10 @@ jint G1CollectedHeap::initialize() {
_g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_hrs.initialize((HeapWord*) _g1_reserved.start(),
(HeapWord*) _g1_reserved.end(),
_expansion_regions);
(HeapWord*) _g1_reserved.end());
assert(_hrs.max_length() == _expansion_regions,
err_msg("max length: %u expansion regions: %u",
_hrs.max_length(), _expansion_regions));
// Do later initialization work for concurrent refinement.
_cg1r->init();

View file

@ -71,27 +71,16 @@ uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
// Public
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
uint max_length) {
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
_length = 0;
_heap_bottom = bottom;
_heap_end = end;
_region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0;
_allocated_length = 0;
_max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
_regions_biased = _regions - ((uintx) bottom >> _region_shift);
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
"bottom should be included in the region with index 0");
_regions.initialize(bottom, end, HeapRegion::GrainBytes);
}
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
@ -101,15 +90,15 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end;
assert(_heap_bottom <= next_bottom, "invariant");
assert(heap_bottom() <= next_bottom, "invariant");
while (next_bottom < new_end) {
assert(next_bottom < _heap_end, "invariant");
assert(next_bottom < heap_end(), "invariant");
uint index = length();
assert(index < _max_length, "otherwise we cannot expand further");
assert(index < max_length(), "otherwise we cannot expand further");
if (index == 0) {
// We have not allocated any regions so far
assert(next_bottom == _heap_bottom, "invariant");
assert(next_bottom == heap_bottom(), "invariant");
} else {
// next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant");
@ -122,8 +111,8 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
}
assert(_regions[index] == NULL, "invariant");
_regions[index] = new_hr;
assert(_regions.get_by_index(index) == NULL, "invariant");
_regions.set_by_index(index, new_hr);
increment_allocated_length();
}
// Have to increment the length first, otherwise we will get an
@ -228,26 +217,26 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
guarantee(_length <= _allocated_length,
guarantee(length() <= _allocated_length,
err_msg("invariant: _length: %u _allocated_length: %u",
_length, _allocated_length));
guarantee(_allocated_length <= _max_length,
length(), _allocated_length));
guarantee(_allocated_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u",
_allocated_length, _max_length));
guarantee(_next_search_index <= _length,
_allocated_length, max_length()));
guarantee(_next_search_index <= length(),
err_msg("invariant: _next_search_index: %u _length: %u",
_next_search_index, _length));
_next_search_index, length()));
HeapWord* prev_end = _heap_bottom;
HeapWord* prev_end = heap_bottom();
for (uint i = 0; i < _allocated_length; i += 1) {
HeapRegion* hr = _regions[i];
HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end,
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
if (i < _length) {
if (i < length()) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
@ -265,8 +254,8 @@ void HeapRegionSeq::verify_optional() {
prev_end = hr->end();
}
}
for (uint i = _allocated_length; i < _max_length; i += 1) {
guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
for (uint i = _allocated_length; i < max_length(); i += 1) {
guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
}
}
#endif // PRODUCT

View file

@ -25,10 +25,17 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/g1BiasedArray.hpp"
class HeapRegion;
class HeapRegionClosure;
class FreeRegionList;
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
protected:
virtual HeapRegion* default_value() const { return NULL; }
};
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
@ -44,35 +51,21 @@ class FreeRegionList;
//
// We keep track of three lengths:
//
// * _length (returned by length()) is the number of currently
// * _committed_length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of
// regions the heap can have.
// * max_length() returns the maximum number of regions the heap can have.
//
// and maintain that: _length <= _allocated_length <= _max_length
// and maintain that: _committed_length <= _allocated_length <= max_length()
class HeapRegionSeq: public CHeapObj<mtGC> {
friend class VMStructs;
// The array that holds the HeapRegions.
HeapRegion** _regions;
// Version of _regions biased to address 0
HeapRegion** _regions_biased;
G1HeapRegionTable _regions;
// The number of regions committed in the heap.
uint _length;
// The address of the first reserved word in the heap.
HeapWord* _heap_bottom;
// The address of the last reserved word in the heap - 1.
HeapWord* _heap_end;
// The log of the region byte size.
uint _region_shift;
uint _committed_length;
// A hint for which index to start searching from for humongous
// allocations.
@ -81,37 +74,33 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// The number of regions for which we have allocated HeapRegions for.
uint _allocated_length;
// The maximum number of regions in the heap.
uint _max_length;
// Find a contiguous set of empty regions of length num, starting
// from the given index.
uint find_contiguous_from(uint from, uint num);
// Map a heap address to a biased region index. Assume that the
// address is valid.
inline uintx addr_to_index_biased(HeapWord* addr) const;
void increment_allocated_length() {
assert(_allocated_length < _max_length, "pre-condition");
assert(_allocated_length < max_length(), "pre-condition");
_allocated_length++;
}
void increment_length() {
assert(_length < _max_length, "pre-condition");
_length++;
assert(length() < max_length(), "pre-condition");
_committed_length++;
}
void decrement_length() {
assert(_length > 0, "pre-condition");
_length--;
assert(length() > 0, "pre-condition");
_committed_length--;
}
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
public:
// Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq() { }
HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
void initialize(HeapWord* bottom, HeapWord* end);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
@ -126,10 +115,10 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap.
uint length() const { return _length; }
uint length() const { return _committed_length; }
// Return the maximum number of regions in the heap.
uint max_length() const { return _max_length; }
uint max_length() const { return (uint)_regions.length(); }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use

View file

@ -28,28 +28,16 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
uintx index = (uintx) addr >> _region_shift;
return index;
}
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
uintx index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased];
HeapRegion* hr = _regions.get_by_address(addr);
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
if (addr != NULL && addr < _heap_end) {
assert(addr >= _heap_bottom,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
if (addr != NULL && addr < heap_end()) {
assert(addr >= heap_bottom(),
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
return addr_to_region_unsafe(addr);
}
return NULL;
@ -57,7 +45,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index];
HeapRegion* hr = _regions.get_by_index(index);
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;

View file

@ -34,8 +34,14 @@
static_field(HeapRegion, GrainBytes, size_t) \
static_field(HeapRegion, LogOfHRGrainBytes, int) \
\
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
nonstatic_field(HeapRegionSeq, _length, uint) \
nonstatic_field(G1HeapRegionTable, _base, address) \
nonstatic_field(G1HeapRegionTable, _length, size_t) \
nonstatic_field(G1HeapRegionTable, _biased_base, address) \
nonstatic_field(G1HeapRegionTable, _bias, size_t) \
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionSeq, _committed_length, uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
@ -58,6 +64,8 @@
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
\
declare_toplevel_type(G1HeapRegionTable) \
\
declare_type(G1CollectedHeap, SharedHeap) \
\
declare_type(HeapRegion, ContiguousSpace) \

View file

@ -122,7 +122,7 @@ void GC_locker::jni_unlock(JavaThread* thread) {
// strictly needed. It's added here to make it clear that
// the GC will NOT be performed if any other caller
// of GC_locker::lock() still needs GC locked.
if (!is_active()) {
if (!is_active_internal()) {
_doing_gc = true;
{
// Must give up the lock while at a safepoint

View file

@ -88,7 +88,7 @@ class GC_locker: public AllStatic {
public:
// Accessors
static bool is_active() {
assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
return is_active_internal();
}
static bool needs_gc() { return _needs_gc; }

View file

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/collectorPolicy.hpp"
@ -111,7 +112,7 @@ typedef class FreeList<Metachunk> ChunkList;
// Has three lists of free chunks, and a total size and
// count that includes all three
class ChunkManager VALUE_OBJ_CLASS_SPEC {
class ChunkManager : public CHeapObj<mtInternal> {
// Free list of chunks of different sizes.
// SpecializedChunk
@ -158,7 +159,12 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
public:
ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
: _free_chunks_total(0), _free_chunks_count(0) {
_free_chunks[SpecializedIndex].set_size(specialized_size);
_free_chunks[SmallIndex].set_size(small_size);
_free_chunks[MediumIndex].set_size(medium_size);
}
// add or delete (return) a chunk to the global freelist.
Metachunk* chunk_freelist_allocate(size_t word_size);
@ -219,7 +225,7 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
void locked_print_free_chunks(outputStream* st);
void locked_print_sum_free_chunks(outputStream* st);
void print_on(outputStream* st);
void print_on(outputStream* st) const;
};
// Used to manage the free list of Metablocks (a block corresponds
@ -276,11 +282,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// VirtualSpace
Metachunk* first_chunk() { return (Metachunk*) bottom(); }
void inc_container_count();
#ifdef ASSERT
uint container_count_slow();
#endif
public:
VirtualSpaceNode(size_t byte_size);
@ -314,8 +315,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
void inc_top(size_t word_size) { _top += word_size; }
uintx container_count() { return _container_count; }
void inc_container_count();
void dec_container_count();
#ifdef ASSERT
uint container_count_slow();
void verify_container_count();
#endif
@ -421,8 +424,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
VirtualSpaceNode* _virtual_space_list;
// virtual space currently being used for allocations
VirtualSpaceNode* _current_virtual_space;
// Free chunk list for all other metadata
ChunkManager _chunk_manager;
// Can this virtual list allocate >1 spaces? Also, used to determine
// whether to allocate unlimited small chunks in this virtual space
@ -475,7 +476,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
return _current_virtual_space;
}
ChunkManager* chunk_manager() { return &_chunk_manager; }
bool is_class() const { return _is_class; }
// Allocate the first virtualspace.
@ -494,14 +494,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
void dec_virtual_space_count();
// Unlink empty VirtualSpaceNodes and free it.
void purge();
// Used and capacity in the entire list of virtual spaces.
// These are global values shared by all Metaspaces
size_t capacity_words_sum();
size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
size_t used_words_sum();
size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
void purge(ChunkManager* chunk_manager);
bool contains(const void *ptr);
@ -582,18 +575,12 @@ class SpaceManager : public CHeapObj<mtClass> {
// Type of metadata allocated.
Metaspace::MetadataType _mdtype;
// Chunk related size
size_t _medium_chunk_bunch;
// List of chunks in use by this SpaceManager. Allocations
// are done from the current chunk. The list is used for deallocating
// chunks when the SpaceManager is freed.
Metachunk* _chunks_in_use[NumberOfInUseLists];
Metachunk* _current_chunk;
// Virtual space where allocation comes from.
VirtualSpaceList* _vs_list;
// Number of small chunks to allocate to a manager
// If class space manager, small chunks are unlimited
static uint const _small_chunk_limit;
@ -626,7 +613,9 @@ class SpaceManager : public CHeapObj<mtClass> {
}
Metaspace::MetadataType mdtype() { return _mdtype; }
VirtualSpaceList* vs_list() const { return _vs_list; }
VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
Metachunk* current_chunk() const { return _current_chunk; }
void set_current_chunk(Metachunk* v) {
@ -648,18 +637,19 @@ class SpaceManager : public CHeapObj<mtClass> {
public:
SpaceManager(Metaspace::MetadataType mdtype,
Mutex* lock,
VirtualSpaceList* vs_list);
Mutex* lock);
~SpaceManager();
enum ChunkMultiples {
MediumChunkMultiple = 4
};
bool is_class() { return _mdtype == Metaspace::ClassType; }
// Accessors
size_t specialized_chunk_size() { return SpecializedChunk; }
size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
size_t allocated_blocks_words() const { return _allocated_blocks_words; }
@ -762,7 +752,7 @@ void VirtualSpaceNode::inc_container_count() {
_container_count++;
assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
"container_count_slow() " SIZE_FORMAT,
" container_count_slow() " SIZE_FORMAT,
_container_count, container_count_slow()));
}
@ -775,7 +765,7 @@ void VirtualSpaceNode::dec_container_count() {
void VirtualSpaceNode::verify_container_count() {
assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
"container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
" container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
}
#endif
@ -1020,7 +1010,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) {
// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists.
void VirtualSpaceList::purge() {
void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
assert_lock_strong(SpaceManager::expand_lock());
// Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe.
@ -1042,7 +1032,7 @@ void VirtualSpaceList::purge() {
prev_vsl->set_next(vsl->next());
}
vsl->purge(chunk_manager());
vsl->purge(chunk_manager);
dec_reserved_words(vsl->reserved_words());
dec_committed_words(vsl->committed_words());
dec_virtual_space_count();
@ -1064,36 +1054,6 @@ void VirtualSpaceList::purge() {
#endif
}
size_t VirtualSpaceList::used_words_sum() {
size_t allocated_by_vs = 0;
VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsl = iter.get_next();
// Sum used region [bottom, top) in each virtualspace
allocated_by_vs += vsl->used_words_in_vs();
}
assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
err_msg("Total in free chunks " SIZE_FORMAT
" greater than total from virtual_spaces " SIZE_FORMAT,
allocated_by_vs, chunk_manager()->free_chunks_total_words()));
size_t used =
allocated_by_vs - chunk_manager()->free_chunks_total_words();
return used;
}
// Space available in all MetadataVirtualspaces allocated
// for metadata. This is the upper limit on the capacity
// of chunks allocated out of all the MetadataVirtualspaces.
size_t VirtualSpaceList::capacity_words_sum() {
size_t capacity = 0;
VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsl = iter.get_next();
capacity += vsl->capacity_words_in_vs();
}
return capacity;
}
VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
_is_class(false),
_virtual_space_list(NULL),
@ -1104,10 +1064,6 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
bool initialization_succeeded = grow_vs(word_size);
_chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
_chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
_chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
assert(initialization_succeeded,
" VirtualSpaceList initialization should not fail");
}
@ -1123,9 +1079,6 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
Mutex::_no_safepoint_check_flag);
VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
bool succeeded = class_entry->initialize();
_chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
_chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
_chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
assert(succeeded, " VirtualSpaceList initialization should not fail");
link_vs(class_entry);
}
@ -1142,7 +1095,7 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
}
// Reserve the space
size_t vs_byte_size = vs_word_size * BytesPerWord;
assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
// Allocate the meta virtual space and initialize it.
VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@ -1195,15 +1148,8 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words,
size_t medium_chunk_bunch) {
// Get a chunk from the chunk freelist
Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
if (next != NULL) {
next->container()->inc_container_count();
} else {
// Allocate a chunk out of the current virtual space.
next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
}
// Allocate a chunk out of the current virtual space.
Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
if (next == NULL) {
// Not enough room in current virtual space. Try to commit
@ -1221,12 +1167,14 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
// being used for CompressedHeaders, don't allocate a new virtualspace.
if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
// Get another virtual space.
size_t grow_vs_words =
MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
size_t allocation_aligned_expand_words =
align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
size_t grow_vs_words =
MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
if (grow_vs(grow_vs_words)) {
// Got it. It's on the list now. Get a chunk from it.
assert(current_virtual_space()->expanded_words() == 0,
"New virtuals space nodes should not have expanded");
"New virtual space nodes should not have expanded");
size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
page_size_words);
@ -1342,8 +1290,9 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
// reserved space, because this is a larger space prereserved for compressed
// class pointers.
if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
size_t real_allocated = Metaspace::space_list()->reserved_words() +
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
size_t real_allocated = nonclass_allocated + class_allocated;
if (real_allocated >= MaxMetaspaceSize) {
return false;
}
@ -1536,15 +1485,15 @@ void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
if (dummy_chunk == NULL) {
break;
}
vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
sm->sum_count_in_chunks_in_use());
dummy_chunk->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" Free chunks total %d count %d",
vsl->chunk_manager()->free_chunks_total_words(),
vsl->chunk_manager()->free_chunks_count());
sm->chunk_manager()->free_chunks_total_words(),
sm->chunk_manager()->free_chunks_count());
}
}
} else {
@ -1796,6 +1745,8 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
// work.
chunk->set_is_free(false);
#endif
chunk->container()->inc_container_count();
slow_locked_verify();
return chunk;
}
@ -1830,9 +1781,9 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
return chunk;
}
void ChunkManager::print_on(outputStream* out) {
void ChunkManager::print_on(outputStream* out) const {
if (PrintFLSStatistics != 0) {
humongous_dictionary()->report_statistics();
const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
}
}
@ -1979,8 +1930,8 @@ void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
}
}
vs_list()->chunk_manager()->locked_print_free_chunks(st);
vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
chunk_manager()->locked_print_free_chunks(st);
chunk_manager()->locked_print_sum_free_chunks(st);
}
size_t SpaceManager::calc_chunk_size(size_t word_size) {
@ -2084,9 +2035,7 @@ void SpaceManager::print_on(outputStream* st) const {
}
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
Mutex* lock,
VirtualSpaceList* vs_list) :
_vs_list(vs_list),
Mutex* lock) :
_mdtype(mdtype),
_allocated_blocks_words(0),
_allocated_chunks_words(0),
@ -2172,9 +2121,7 @@ SpaceManager::~SpaceManager() {
MutexLockerEx fcl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
ChunkManager* chunk_manager = vs_list()->chunk_manager();
chunk_manager->slow_locked_verify();
chunk_manager()->slow_locked_verify();
dec_total_from_size_metrics();
@ -2188,8 +2135,8 @@ SpaceManager::~SpaceManager() {
// Have to update before the chunks_in_use lists are emptied
// below.
chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
sum_count_in_chunks_in_use());
chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
sum_count_in_chunks_in_use());
// Add all the chunks in use by this space manager
// to the global list of free chunks.
@ -2204,11 +2151,11 @@ SpaceManager::~SpaceManager() {
chunk_size_name(i));
}
Metachunk* chunks = chunks_in_use(i);
chunk_manager->return_chunks(i, chunks);
chunk_manager()->return_chunks(i, chunks);
set_chunks_in_use(i, NULL);
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("updated freelist count %d %s",
chunk_manager->free_chunks(i)->count(),
chunk_manager()->free_chunks(i)->count(),
chunk_size_name(i));
}
assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
@ -2245,16 +2192,16 @@ SpaceManager::~SpaceManager() {
humongous_chunks->word_size(), HumongousChunkGranularity));
Metachunk* next_humongous_chunks = humongous_chunks->next();
humongous_chunks->container()->dec_container_count();
chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
humongous_chunks = next_humongous_chunks;
}
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("updated dictionary count %d %s",
chunk_manager->humongous_dictionary()->total_count(),
chunk_manager()->humongous_dictionary()->total_count(),
chunk_size_name(HumongousIndex));
}
chunk_manager->slow_locked_verify();
chunk_manager()->slow_locked_verify();
}
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
@ -2343,9 +2290,7 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
sum_count_in_chunks_in_use());
new_chunk->print_on(gclog_or_tty);
if (vs_list() != NULL) {
vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
}
chunk_manager()->locked_print_free_chunks(gclog_or_tty);
}
}
@ -2361,10 +2306,14 @@ void SpaceManager::retire_current_chunk() {
Metachunk* SpaceManager::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words) {
// Get a chunk from the chunk freelist
Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
Metachunk* next = vs_list()->get_new_chunk(word_size,
grow_chunks_by_words,
medium_chunk_bunch());
if (next == NULL) {
next = vs_list()->get_new_chunk(word_size,
grow_chunks_by_words,
medium_chunk_bunch());
}
if (TraceMetadataHumongousAllocation && next != NULL &&
SpaceManager::is_humongous(next->word_size())) {
@ -2644,13 +2593,12 @@ size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
if (list == NULL) {
ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
if (chunk_manager == NULL) {
return 0;
}
ChunkManager* chunk = list->chunk_manager();
chunk->slow_verify();
return chunk->free_chunks_total_words();
chunk_manager->slow_verify();
return chunk_manager->free_chunks_total_words();
}
size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
@ -2801,9 +2749,9 @@ void MetaspaceAux::dump(outputStream* out) {
}
void MetaspaceAux::verify_free_chunks() {
Metaspace::space_list()->chunk_manager()->verify();
Metaspace::chunk_manager_metadata()->verify();
if (Metaspace::using_class_space()) {
Metaspace::class_space_list()->chunk_manager()->verify();
Metaspace::chunk_manager_class()->verify();
}
}
@ -2874,6 +2822,9 @@ Metaspace::~Metaspace() {
VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;
ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
ChunkManager* Metaspace::_chunk_manager_class = NULL;
#define VIRTUALSPACEMULTIPLIER 2
#ifdef _LP64
@ -2981,6 +2932,7 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs);
_chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
}
#endif
@ -3006,6 +2958,7 @@ void Metaspace::global_initialize() {
// remainder is the misc code and data chunks.
cds_total = FileMapInfo::shared_spaces_size();
_space_list = new VirtualSpaceList(cds_total/wordSize);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
#ifdef _LP64
// Set the compressed klass pointer base so that decoding of these pointers works
@ -3073,15 +3026,30 @@ void Metaspace::global_initialize() {
size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
// Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
}
}
Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size,
size_t chunk_bunch) {
// Get a chunk from the chunk freelist
Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
if (chunk != NULL) {
return chunk;
}
return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
}
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
assert(space_list() != NULL,
"Metadata VirtualSpaceList has not been initialized");
assert(chunk_manager_metadata() != NULL,
"Metadata ChunkManager has not been initialized");
_vsm = new SpaceManager(NonClassType, lock, space_list());
_vsm = new SpaceManager(NonClassType, lock);
if (_vsm == NULL) {
return;
}
@ -3090,11 +3058,13 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
if (using_class_space()) {
assert(class_space_list() != NULL,
"Class VirtualSpaceList has not been initialized");
assert(class_space_list() != NULL,
"Class VirtualSpaceList has not been initialized");
assert(chunk_manager_class() != NULL,
"Class ChunkManager has not been initialized");
// Allocate SpaceManager for classes.
_class_vsm = new SpaceManager(ClassType, lock, class_space_list());
_class_vsm = new SpaceManager(ClassType, lock);
if (_class_vsm == NULL) {
return;
}
@ -3103,9 +3073,9 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
// Allocate chunk for metadata objects
Metachunk* new_chunk =
space_list()->get_initialization_chunk(word_size,
vsm()->medium_chunk_bunch());
Metachunk* new_chunk = get_initialization_chunk(NonClassType,
word_size,
vsm()->medium_chunk_bunch());
assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
if (new_chunk != NULL) {
// Add to this manager's list of chunks in use and current_chunk().
@ -3114,9 +3084,9 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
// Allocate chunk for class metadata objects
if (using_class_space()) {
Metachunk* class_chunk =
class_space_list()->get_initialization_chunk(class_word_size,
class_vsm()->medium_chunk_bunch());
Metachunk* class_chunk = get_initialization_chunk(ClassType,
class_word_size,
class_vsm()->medium_chunk_bunch());
if (class_chunk != NULL) {
class_vsm()->add_chunk(class_chunk, true);
}
@ -3333,12 +3303,16 @@ void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
}
}
void Metaspace::purge(MetadataType mdtype) {
get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
}
void Metaspace::purge() {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
space_list()->purge();
purge(NonClassType);
if (using_class_space()) {
class_space_list()->purge();
purge(ClassType);
}
}
@ -3385,7 +3359,7 @@ void Metaspace::dump(outputStream* const out) const {
#ifndef PRODUCT
class MetaspaceAuxTest : AllStatic {
class TestMetaspaceAuxTest : AllStatic {
public:
static void test_reserved() {
size_t reserved = MetaspaceAux::reserved_bytes();
@ -3425,14 +3399,25 @@ class MetaspaceAuxTest : AllStatic {
}
}
static void test_virtual_space_list_large_chunk() {
VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
// A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
// vm_allocation_granularity aligned on Windows.
size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
large_size += (os::vm_page_size()/BytesPerWord);
vs_list->get_new_chunk(large_size, large_size, 0);
}
static void test() {
test_reserved();
test_committed();
test_virtual_space_list_large_chunk();
}
};
void MetaspaceAux_test() {
MetaspaceAuxTest::test();
void TestMetaspaceAux_test() {
TestMetaspaceAuxTest::test();
}
#endif

View file

@ -56,12 +56,15 @@
// +-------------------+
//
class ChunkManager;
class ClassLoaderData;
class Metablock;
class Metachunk;
class MetaWord;
class Mutex;
class outputStream;
class SpaceManager;
class VirtualSpaceList;
// Metaspaces each have a SpaceManager and allocations
// are done by the SpaceManager. Allocations are done
@ -76,8 +79,6 @@ class SpaceManager;
// allocate() method returns a block for use as a
// quantum of metadata.
class VirtualSpaceList;
class Metaspace : public CHeapObj<mtClass> {
friend class VMStructs;
friend class SpaceManager;
@ -102,6 +103,10 @@ class Metaspace : public CHeapObj<mtClass> {
private:
void initialize(Mutex* lock, MetaspaceType type);
Metachunk* get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size,
size_t chunk_bunch);
// Align up the word size to the allocation word size
static size_t align_word_size_up(size_t);
@ -134,6 +139,10 @@ class Metaspace : public CHeapObj<mtClass> {
static VirtualSpaceList* _space_list;
static VirtualSpaceList* _class_space_list;
static ChunkManager* _chunk_manager_metadata;
static ChunkManager* _chunk_manager_class;
public:
static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; }
static VirtualSpaceList* get_space_list(MetadataType mdtype) {
@ -141,6 +150,14 @@ class Metaspace : public CHeapObj<mtClass> {
return mdtype == ClassType ? class_space_list() : space_list();
}
static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
static ChunkManager* chunk_manager_class() { return _chunk_manager_class; }
static ChunkManager* get_chunk_manager(MetadataType mdtype) {
assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
}
private:
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
@ -199,6 +216,7 @@ class Metaspace : public CHeapObj<mtClass> {
void dump(outputStream* const out) const;
// Free empty virtualspaces
static void purge(MetadataType mdtype);
static void purge();
void print_on(outputStream* st) const;

View file

@ -123,7 +123,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// Allows targeted inlining
if(callee_method->should_inline()) {
*wci_result = *(WarmCallInfo::always_hot());
if (PrintInlining && Verbose) {
if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method is hot: ");
}
@ -137,7 +137,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
size < InlineThrowMaxSize ) {
wci_result->set_profit(wci_result->profit() * 100);
if (PrintInlining && Verbose) {
if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
}
@ -491,7 +491,7 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
C->log()->inline_fail(inline_msg);
}
}
if (PrintInlining) {
if (C->print_inlining()) {
C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
if (Verbose && callee_method) {
@ -540,7 +540,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
#ifndef PRODUCT
if (UseOldInlining && InlineWarmCalls
&& (PrintOpto || PrintOptoInlining || PrintInlining)) {
&& (PrintOpto || C->print_inlining())) {
bool cold = wci.is_cold();
bool hot = !cold && wci.is_hot();
bool old_cold = !success;
@ -617,7 +617,7 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J
callee_method->is_compiled_lambda_form()) {
max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem
}
if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr(" \\-> discounting inline depth");
}

View file

@ -159,8 +159,9 @@ class CallGenerator : public ResourceObj {
virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
if (PrintInlining)
if (C->print_inlining()) {
C->print_inlining(callee, inline_level, bci, msg);
}
}
};

View file

@ -654,7 +654,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
_print_inlining(0) {
_print_inlining_idx(0) {
C = this;
CompileWrapper cw(this);
@ -679,6 +679,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
set_print_assembly(print_opto_assembly);
set_parsed_irreducible_loop(false);
#endif
set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
if (ProfileTraps) {
// Make sure the method being compiled gets its own MDO,
@ -710,7 +712,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
if (print_inlining() || print_intrinsics()) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
@ -937,7 +939,7 @@ Compile::Compile( ciEnv* ci_env,
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
_print_inlining(0) {
_print_inlining_idx(0) {
C = this;
#ifndef PRODUCT
@ -3611,7 +3613,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
}
void Compile::dump_inlining() {
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
if (print_inlining() || print_intrinsics()) {
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {
@ -3635,7 +3637,7 @@ void Compile::dump_inlining() {
}
}
for (int i = 0; i < _print_inlining_list->length(); i++) {
tty->print(_print_inlining_list->at(i).ss()->as_string());
tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
}
}
}

View file

@ -312,6 +312,8 @@ class Compile : public Phase {
bool _do_method_data_update; // True if we generate code to update MethodData*s
int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
bool _print_assembly; // True if we should dump assembly code for this compilation
bool _print_inlining; // True if we should print inlining for this compilation
bool _print_intrinsics; // True if we should print intrinsics for this compilation
#ifndef PRODUCT
bool _trace_opto_output;
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@ -414,7 +416,7 @@ class Compile : public Phase {
};
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
int _print_inlining;
int _print_inlining_idx;
// Only keep nodes in the expensive node list that need to be optimized
void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@ -426,24 +428,24 @@ class Compile : public Phase {
public:
outputStream* print_inlining_stream() const {
return _print_inlining_list->at(_print_inlining).ss();
return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
}
void print_inlining_skip(CallGenerator* cg) {
if (PrintInlining) {
_print_inlining_list->at(_print_inlining).set_cg(cg);
_print_inlining++;
_print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
if (_print_inlining) {
_print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
_print_inlining_idx++;
_print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
}
}
void print_inlining_insert(CallGenerator* cg) {
if (PrintInlining) {
if (_print_inlining) {
for (int i = 0; i < _print_inlining_list->length(); i++) {
if (_print_inlining_list->at(i).cg() == cg) {
if (_print_inlining_list->adr_at(i)->cg() == cg) {
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
_print_inlining = i+1;
_print_inlining_list->at(i).set_cg(NULL);
_print_inlining_idx = i+1;
_print_inlining_list->adr_at(i)->set_cg(NULL);
return;
}
}
@ -572,6 +574,10 @@ class Compile : public Phase {
int AliasLevel() const { return _AliasLevel; }
bool print_assembly() const { return _print_assembly; }
void set_print_assembly(bool z) { _print_assembly = z; }
bool print_inlining() const { return _print_inlining; }
void set_print_inlining(bool z) { _print_inlining = z; }
bool print_intrinsics() const { return _print_intrinsics; }
void set_print_intrinsics(bool z) { _print_intrinsics = z; }
// check the CompilerOracle for special behaviours for this compile
bool method_has_option(const char * option) {
return method() != NULL && method()->has_option(option);

View file

@ -41,9 +41,9 @@
#include "runtime/sharedRuntime.hpp"
void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
if (TraceTypeProfile || C->print_inlining()) {
outputStream* out = tty;
if (!PrintInlining) {
if (!C->print_inlining()) {
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
method->print_short_name();
tty->cr();

View file

@ -543,7 +543,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
Compile* C = kit.C;
int nodes = C->unique();
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Intrinsic %s", str);
@ -554,7 +554,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
// Try to inline the intrinsic.
if (kit.try_to_inline()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@ -570,7 +570,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
}
// The intrinsic bailed out
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@ -592,7 +592,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
int nodes = C->unique();
#ifndef PRODUCT
assert(is_predicted(), "sanity");
if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Predicate for intrinsic %s", str);
@ -603,7 +603,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
Node* slow_ctl = kit.try_to_predicate();
if (!kit.failing()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@ -617,7 +617,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
}
// The intrinsic bailed out
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = "failed to generate predicate for intrinsic";
@ -2299,7 +2299,7 @@ const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_
const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
#ifndef PRODUCT
if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
if (C->print_intrinsics() || C->print_inlining()) {
tty->print(" from base type: "); adr_type->dump();
tty->print(" sharpened value: "); tjp->dump();
}
@ -3260,7 +3260,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
if (mirror_con == NULL) return false; // cannot happen?
#ifndef PRODUCT
if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
if (C->print_intrinsics() || C->print_inlining()) {
ciType* k = mirror_con->java_mirror_type();
if (k) {
tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@ -3952,14 +3952,14 @@ bool LibraryCallKit::inline_native_getClass() {
// caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
}
#endif
if (!jvms()->has_method()) {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because intrinsic was inlined at top level");
}
#endif
@ -3983,7 +3983,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
// Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
if (!m->caller_sensitive()) {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
}
#endif
@ -3999,7 +3999,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
set_result(makecon(TypeInstPtr::make(caller_mirror)));
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@ -4015,7 +4015,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
}
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {

View file

@ -5046,7 +5046,10 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
void TestVirtualSpace_test();
void MetaspaceAux_test();
void TestMetaspaceAux_test();
#if INCLUDE_ALL_GCS
void TestG1BiasedArray_test();
#endif
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
@ -5054,7 +5057,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestReservedSpace_test());
run_unit_test(TestReserveMemorySpecial_test());
run_unit_test(TestVirtualSpace_test());
run_unit_test(MetaspaceAux_test());
run_unit_test(TestMetaspaceAux_test());
run_unit_test(GlobalDefinitions::test_globals());
run_unit_test(GCTimerAllTest::all());
run_unit_test(arrayOopDesc::test_max_array_length());
@ -5066,6 +5069,7 @@ void execute_internal_vm_tests() {
run_unit_test(VMStructs::test());
#endif
#if INCLUDE_ALL_GCS
run_unit_test(TestG1BiasedArray_test());
run_unit_test(HeapRegionRemSet::test_prt());
#endif
tty->print_cr("All internal VM tests passed");

View file

@ -1506,8 +1506,11 @@ methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
info, CHECK_(methodHandle()));
inline_cache->set_to_monomorphic(info);
} else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
// Change to megamorphic
inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
// Potential change to megamorphic
bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
if (!successful) {
inline_cache->set_to_clean();
}
} else {
// Either clean or megamorphic
}

View file

@ -78,7 +78,13 @@ class Abstract_VM_Version: AllStatic {
static const char* jre_release_version();
// does HW support an 8-byte compare-exchange operation?
static bool supports_cx8() {return _supports_cx8;}
static bool supports_cx8() {
#ifdef SUPPORTS_NATIVE_CX8
return true;
#else
return _supports_cx8;
#endif
}
// does HW support atomic get-and-set or atomic get-and-add? Used
// to guide intrinsification decisions for Unsafe atomic ops
static bool supports_atomic_getset4() {return _supports_atomic_getset4;}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8022585
* @summary VM crashes when ran with -XX:+PrintInlining
* @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
*
*/
public class PrintInlining {
public static void main(String[] args) {
System.out.println("Passed");
}
}