This commit is contained in:
Nils Eliasson 2015-11-16 20:56:18 +01:00
commit c2892b5dce
16 changed files with 334 additions and 106 deletions

View file

@ -229,35 +229,35 @@ void VM_Version::initialize() {
// SPARC T4 and above should have support for AES instructions
if (has_aes()) {
if (UseVIS > 2) { // AES intrinsics use MOVxTOd/MOVdTOx which are VIS3
if (FLAG_IS_DEFAULT(UseAES)) {
FLAG_SET_DEFAULT(UseAES, true);
}
if (!UseAES) {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
} else {
// The AES intrinsic stubs require AES instruction support (of course)
// but also require VIS3 mode or higher for instructions it use.
if (UseVIS > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
// we disable both the AES flags if either of them is disabled on the command line
if (!UseAES || !UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAES, false);
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
} else {
if (UseAES || UseAESIntrinsics) {
warning("SPARC AES intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
if (UseAES) {
FLAG_SET_DEFAULT(UseAES, false);
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("SPARC AES intrinsics require VIS3 instructions. Intrinsics will be disabled.");
}
if (UseAESIntrinsics) {
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
}
} else if (UseAES || UseAESIntrinsics) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
if (UseAES) {
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}

View file

@ -632,13 +632,37 @@ void VM_Version::get_processor_features() {
// Use AES instructions if available.
if (supports_aes()) {
if (FLAG_IS_DEFAULT(UseAES)) {
UseAES = true;
FLAG_SET_DEFAULT(UseAES, true);
}
} else if (UseAES) {
if (!FLAG_IS_DEFAULT(UseAES))
if (!UseAES) {
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
} else {
if (UseSSE > 2) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
}
} else {
// The AES intrinsic stubs require AES instruction support (of course)
// but also require sse3 mode or higher for instructions it use.
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
} else if (UseAES || UseAESIntrinsics) {
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
}
// Use CLMUL instructions if available.
if (supports_clmul()) {
@ -673,18 +697,6 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
}
// The AES intrinsic stubs require AES instruction support (of course)
// but also require sse3 mode for instructions it use.
if (UseAES && (UseSSE > 2)) {
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
UseAESIntrinsics = true;
}
} else if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
// GHASH/GCM intrinsics
if (UseCLMUL && (UseSSE > 2)) {
if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {

View file

@ -109,6 +109,7 @@
template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \
template(java_io_Serializable, "java/io/Serializable") \
template(java_util_Arrays, "java/util/Arrays") \
template(java_util_Objects, "java/util/Objects") \
template(java_util_Properties, "java/util/Properties") \
template(java_util_Vector, "java/util/Vector") \
template(java_util_AbstractList, "java/util/AbstractList") \
@ -883,6 +884,9 @@
do_intrinsic(_equalsL, java_lang_StringLatin1,equals_name, equalsB_signature, F_S) \
do_intrinsic(_equalsU, java_lang_StringUTF16, equals_name, equalsB_signature, F_S) \
\
do_intrinsic(_Objects_checkIndex, java_util_Objects, checkIndex_name, Objects_checkIndex_signature, F_S) \
do_signature(Objects_checkIndex_signature, "(IILjava/util/function/BiFunction;)I") \
\
do_class(java_nio_Buffer, "java/nio/Buffer") \
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
do_name( checkIndex_name, "checkIndex") \

View file

@ -140,7 +140,40 @@ nmethod* CodeCache::_scavenge_root_nmethods = NULL;
// Initialize array of CodeHeaps
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
// Prepare error message
const char* error = "Invalid code heap sizes";
err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK",
non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
if (total_size > cache_size) {
// Some code heap sizes were explicitly set: total_size must be <= cache_size
message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K);
vm_exit_during_initialization(error, message);
} else if (all_set && total_size != cache_size) {
// All code heap sizes were explicitly set: total_size must equal cache_size
message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K);
vm_exit_during_initialization(error, message);
}
}
void CodeCache::initialize_heaps() {
bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
size_t min_size = os::vm_page_size();
size_t cache_size = ReservedCodeCacheSize;
size_t non_nmethod_size = NonNMethodCodeHeapSize;
size_t profiled_size = ProfiledCodeHeapSize;
size_t non_profiled_size = NonProfiledCodeHeapSize;
// Check if total size set via command line flags exceeds the reserved size
check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),
(profiled_set ? profiled_size : min_size),
(non_profiled_set ? non_profiled_size : min_size),
cache_size,
non_nmethod_set && profiled_set && non_profiled_set);
// Determine size of compiler buffers
size_t code_buffers_size = 0;
#ifdef COMPILER1
@ -155,51 +188,94 @@ void CodeCache::initialize_heaps() {
code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif
// Increase default non_nmethod_size to account for compiler buffers
if (!non_nmethod_set) {
non_nmethod_size += code_buffers_size;
}
// Calculate default CodeHeap sizes if not set by user
if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
&& !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
// Increase default NonNMethodCodeHeapSize to account for compiler buffers
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);
if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
// Check if we have enough space for the non-nmethod code heap
if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
// Use the default value for NonNMethodCodeHeapSize and one half of the
// remaining size for non-profiled methods and one half for profiled methods
size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
size_t profiled_size = remaining_size / 2;
size_t non_profiled_size = remaining_size - profiled_size;
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
if (cache_size > non_nmethod_size) {
// Use the default value for non_nmethod_size and one half of the
// remaining size for non-profiled and one half for profiled methods
size_t remaining_size = cache_size - non_nmethod_size;
profiled_size = remaining_size / 2;
non_profiled_size = remaining_size - profiled_size;
} else {
// Use all space for the non-nmethod heap and set other heaps to minimal size
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
non_nmethod_size = cache_size - 2 * min_size;
profiled_size = min_size;
non_profiled_size = min_size;
}
} else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
// The user explicitly set some code heap sizes. Increase or decrease the (default)
// sizes of the other code heaps accordingly. First adapt non-profiled and profiled
// code heap sizes and then only change non-nmethod code heap size if still necessary.
intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
if (non_profiled_set) {
if (!profiled_set) {
// Adapt size of profiled code heap
if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += profiled_size - min_size;
profiled_size = min_size;
} else {
profiled_size += diff_size;
diff_size = 0;
}
}
} else if (profiled_set) {
// Adapt size of non-profiled code heap
if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += non_profiled_size - min_size;
non_profiled_size = min_size;
} else {
non_profiled_size += diff_size;
diff_size = 0;
}
} else if (non_nmethod_set) {
// Distribute remaining size between profiled and non-profiled code heaps
diff_size = cache_size - non_nmethod_size;
profiled_size = diff_size / 2;
non_profiled_size = diff_size - profiled_size;
diff_size = 0;
}
if (diff_size != 0) {
// Use non-nmethod code heap for remaining space requirements
assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
non_nmethod_size += diff_size;
}
}
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
if(!heap_available(CodeBlobType::MethodProfiled)) {
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
non_profiled_size += profiled_size;
profiled_size = 0;
}
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
if(!heap_available(CodeBlobType::MethodNonProfiled)) {
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
non_nmethod_size += non_profiled_size;
non_profiled_size = 0;
}
// Make sure we have enough space for VM internal code
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization(err_msg(
"Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
}
guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
// Verify sizes and update flag values
assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
// Align CodeHeaps
size_t alignment = heap_alignment();
size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment);
size_t profiled_size = align_size_down(ProfiledCodeHeapSize, alignment);
non_nmethod_size = align_size_up(non_nmethod_size, alignment);
profiled_size = align_size_down(profiled_size, alignment);
// Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this:
@ -208,9 +284,9 @@ void CodeCache::initialize_heaps() {
// Profiled nmethods
// Non-nmethods
// ---------- low ------------
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
ReservedSpace non_method_space = rs.first_part(non_method_size);
ReservedSpace rest = rs.last_part(non_method_size);
ReservedCodeSpace rs = reserve_heap_memory(cache_size);
ReservedSpace non_method_space = rs.first_part(non_nmethod_size);
ReservedSpace rest = rs.last_part(non_nmethod_size);
ReservedSpace profiled_space = rest.first_part(profiled_size);
ReservedSpace non_profiled_space = rest.last_part(profiled_size);

View file

@ -94,6 +94,8 @@ class CodeCache : AllStatic {
// CodeHeap management
static void initialize_heaps(); // Initializes the CodeHeaps
// Check the code heap sizes set by the user via command line
static void check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set);
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
static void add_heap(ReservedSpace rs, const char* name, int code_blob_type);
static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob

View file

@ -451,6 +451,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_updateByteBufferAdler32:
case vmIntrinsics::_profileBoolean:
case vmIntrinsics::_isCompileConstant:
case vmIntrinsics::_Objects_checkIndex:
break;
default:
return false;

View file

@ -485,7 +485,7 @@ ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
return NULL;
}
if (l->is_top()) return NULL; // Top input means dead test
if (r->Opcode() != Op_LoadRange) return NULL;
if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return NULL;
// We have recognized one of these forms:
// Flip 1: If (Bool[<] CmpU(l, LoadRange)) ...
@ -525,9 +525,9 @@ int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
return 0;
} else if (l->Opcode() == Op_AddI) {
if ((off = l->in(1)->find_int_con(0)) != 0) {
ind = l->in(2);
ind = l->in(2)->uncast();
} else if ((off = l->in(2)->find_int_con(0)) != 0) {
ind = l->in(1);
ind = l->in(1)->uncast();
}
} else if ((off = l->find_int_con(-1)) >= 0) {
// constant offset with no variable index
@ -806,7 +806,11 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod
// that the call stacks are equal for both JVMStates.
JVMState* dom_caller = dom_unc->jvms()->caller();
JVMState* caller = unc->jvms()->caller();
if (!dom_caller->same_calls_as(caller)) {
if ((dom_caller == NULL) != (caller == NULL)) {
// The current method must either be inlined into both dom_caller and
// caller or must not be inlined at all (top method). Bail out otherwise.
return false;
} else if (dom_caller != NULL && !dom_caller->same_calls_as(caller)) {
return false;
}
// Check that the bci of the dominating uncommon trap dominates the bci

View file

@ -256,6 +256,7 @@ class LibraryCallKit : public GraphKit {
bool inline_native_getLength();
bool inline_array_copyOf(bool is_copyOfRange);
bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
bool inline_objects_checkIndex();
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
bool inline_native_clone(bool is_virtual);
bool inline_native_Reflection_getCallerClass();
@ -647,6 +648,7 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
case vmIntrinsics::_Objects_checkIndex: return inline_objects_checkIndex();
case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
@ -1045,6 +1047,54 @@ bool LibraryCallKit::inline_hasNegatives() {
return true;
}
bool LibraryCallKit::inline_objects_checkIndex() {
Node* index = argument(0);
Node* length = argument(1);
if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
return false;
}
Node* len_pos_cmp = _gvn.transform(new CmpINode(length, intcon(0)));
Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
{
BuildCutout unless(this, len_pos_bol, PROB_MAX);
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_make_not_entrant);
}
if (stopped()) {
return false;
}
Node* rc_cmp = _gvn.transform(new CmpUNode(index, length));
BoolTest::mask btest = BoolTest::lt;
Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
_gvn.set_type(rc, rc->Value(&_gvn));
if (!rc_bool->is_Con()) {
record_for_igvn(rc);
}
set_control(_gvn.transform(new IfTrueNode(rc)));
{
PreserveJVMState pjvms(this);
set_control(_gvn.transform(new IfFalseNode(rc)));
uncommon_trap(Deoptimization::Reason_range_check,
Deoptimization::Action_make_not_entrant);
}
if (stopped()) {
return false;
}
Node* result = new CastIINode(index, TypeInt::make(0, _gvn.type(length)->is_int()->_hi, Type::WidenMax));
result->set_req(0, control());
result = _gvn.transform(result);
set_result(result);
replace_in_map(index, result);
return true;
}
//------------------------------inline_string_indexOf------------------------
bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
if (!Matcher::has_match_rule(Op_StrIndexOf) || !UseSSE42Intrinsics) {

View file

@ -569,7 +569,7 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invari
return false;
}
Node* range = cmp->in(2);
if (range->Opcode() != Op_LoadRange) {
if (range->Opcode() != Op_LoadRange && !iff->is_RangeCheck()) {
const TypeInt* tint = phase->_igvn.type(range)->isa_int();
if (tint == NULL || tint->empty() || tint->_lo < 0) {
// Allow predication on positive values that aren't LoadRanges.

View file

@ -329,6 +329,9 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
Node* phi_incr = NULL;
// Trip-counter increment must be commutative & associative.
if (incr->Opcode() == Op_CastII) {
incr = incr->in(1);
}
if (incr->is_Phi()) {
if (incr->as_Phi()->region() != x || incr->req() != 3)
return false; // Not simple trip counter expression
@ -356,6 +359,9 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
xphi = stride;
stride = tmp;
}
if (xphi->Opcode() == Op_CastII) {
xphi = xphi->in(1);
}
// Stride must be constant
int stride_con = stride->get_int();
if (stride_con == 0)

View file

@ -964,11 +964,12 @@ void Parse::do_exits() {
}
}
// Any method can write a @Stable field; insert memory barriers after
// those also. If there is a predecessor allocation node, bind the
// barrier there.
// Any method can write a @Stable field; insert memory barriers
// after those also. Can't bind predecessor allocation node (if any)
// with barrier because allocation doesn't always dominate
// MemBarRelease.
if (wrote_stable()) {
_exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
_exits.insert_mem_bar(Op_MemBarRelease);
if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name();
tty->print_cr(" writes @Stable and needs a memory barrier");

View file

@ -311,9 +311,8 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
// Preserve allocation ptr to create precedent edge to it in membar
// generated on exit from constructor.
if (C->eliminate_boxing() &&
adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
// Can't bind stable with its allocation, only record allocation for final field.
if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
set_alloc_with_final(obj);
}
}

View file

@ -1466,24 +1466,6 @@ void Arguments::set_tiered_flags() {
// Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
// Multiply sizes by 5 but fix NonNMethodCodeHeapSize (distribute among non-profiled and profiled code heap)
if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, ProfiledCodeHeapSize * 5 + NonNMethodCodeHeapSize * 2);
}
if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize * 5 + NonNMethodCodeHeapSize * 2);
}
// Check consistency of code heap sizes
if ((NonNMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
jio_fprintf(defaultStream::error_stream(),
"Invalid code heap sizes: NonNMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
NonNMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
(NonNMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
vm_exit(1);
}
}
}
if (!UseInterpreter) { // -Xcomp
Tier3InvokeNotifyFreqLog = 0;
@ -2533,18 +2515,11 @@ bool Arguments::check_vm_args_consistency() {
"Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
CODE_CACHE_SIZE_LIMIT/M);
status = false;
} else if (NonNMethodCodeHeapSize < min_code_cache_size){
} else if (NonNMethodCodeHeapSize < min_code_cache_size) {
jio_fprintf(defaultStream::error_stream(),
"Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K,
min_code_cache_size/K);
status = false;
} else if ((!FLAG_IS_DEFAULT(NonNMethodCodeHeapSize) || !FLAG_IS_DEFAULT(ProfiledCodeHeapSize) || !FLAG_IS_DEFAULT(NonProfiledCodeHeapSize))
&& (NonNMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) {
jio_fprintf(defaultStream::error_stream(),
"Invalid code heap sizes: NonNMethodCodeHeapSize(%dK) + ProfiledCodeHeapSize(%dK) + NonProfiledCodeHeapSize(%dK) = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n",
NonNMethodCodeHeapSize/K, ProfiledCodeHeapSize/K, NonProfiledCodeHeapSize/K,
(NonNMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K);
status = false;
}
if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {

View file

@ -3561,7 +3561,7 @@ public:
\
product_pd(intx, CompilerThreadStackSize, \
"Compiler Thread Stack Size (in Kbytes)") \
range(0, max_intx) \
range(0, max_intx /(1 * K)) \
\
develop_pd(size_t, JVMInvokeMethodSlack, \
"Stack space (bytes) required for JVM_InvokeMethod to complete") \

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestStableMemoryBarrier
* @bug 8139758
* @summary tests memory barrier correctly inserted for stable fields
* @library /testlibrary /../../test/lib
*
* @run main/bootclasspath -Xcomp -XX:CompileOnly=::testCompile
* java.lang.invoke.TestStableMemoryBarrier
*
* @author hui.shi@linaro.org
*/
package java.lang.invoke;
import java.lang.reflect.InvocationTargetException;
public class TestStableMemoryBarrier {
public static void main(String[] args) throws Exception {
run(NotDominate.class);
}
/* ====================================================
* Stable field initialized in method, but its allocation
* doesn't dominate MemBar Release at the end of method.
*/
static class NotDominate{
public @Stable int v;
public static int[] array = new int[100];
public static NotDominate testCompile(int n) {
if ((n % 2) == 0) return null;
// add a loop here, trigger PhaseIdealLoop::verify_dominance
for (int i = 0; i < 100; i++) {
array[i] = n;
}
NotDominate nm = new NotDominate();
nm.v = n;
return nm;
}
public static void test() throws Exception {
for (int i = 0; i < 1000000; i++)
testCompile(i);
}
}
public static void run(Class<?> test) {
Throwable ex = null;
System.out.print(test.getName()+": ");
try {
test.getMethod("test").invoke(null);
} catch (InvocationTargetException e) {
ex = e.getCause();
} catch (Throwable e) {
ex = e;
} finally {
if (ex == null) {
System.out.println("PASSED");
} else {
System.out.println("FAILED");
ex.printStackTrace(System.out);
}
}
}
}

View file

@ -58,6 +58,13 @@ public class TestOptionsWithRanges {
*/
allOptionsAsMap.remove("ThreadStackSize");
/*
* Remove the flag controlling the size of the stack because the
* flag has direct influence on the physical memory usage of
* the VM.
*/
allOptionsAsMap.remove("CompilerThreadStackSize");
/*
* Exclude MallocMaxTestWords as it is expected to exit VM at small values (>=0)
*/