This commit is contained in:
Alejandro Murillo 2016-06-10 15:13:37 -07:00
commit 00815d4c03
33 changed files with 716 additions and 253 deletions

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1530,6 +1530,10 @@ class Assembler : public AbstractAssembler {
inline void ld( Register d, int si16, Register s1); inline void ld( Register d, int si16, Register s1);
inline void ldu( Register d, int si16, Register s1); inline void ldu( Register d, int si16, Register s1);
// For convenience. Load pointer into d from b+s1.
inline void ld_ptr(Register d, int b, Register s1);
DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
// PPC 1, section 3.3.3 Fixed-Point Store Instructions // PPC 1, section 3.3.3 Fixed-Point Store Instructions
inline void stwx( Register d, Register s1, Register s2); inline void stwx( Register d, Register s1, Register s2);
inline void stw( Register d, int si16, Register s1); inline void stw( Register d, int si16, Register s1);
@ -2194,7 +2198,8 @@ class Assembler : public AbstractAssembler {
void add( Register d, RegisterOrConstant roc, Register s1); void add( Register d, RegisterOrConstant roc, Register s1);
void subf(Register d, RegisterOrConstant roc, Register s1); void subf(Register d, RegisterOrConstant roc, Register s1);
void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1); void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1);
// Load pointer d from s1+roc.
void ld_ptr(Register d, RegisterOrConstant roc, Register s1 = noreg) { ld(d, roc, s1); }
// Emit several instructions to load a 64 bit constant. This issues a fixed // Emit several instructions to load a 64 bit constant. This issues a fixed
// instruction pattern so that the constant can be patched later on. // instruction pattern so that the constant can be patched later on.

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -328,6 +328,9 @@ inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));} inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
// PPC 1, section 3.3.3 Fixed-Point Store Instructions // PPC 1, section 3.3.3 Fixed-Point Store Instructions
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));} inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));} inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1242,7 +1242,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
void LIR_Assembler::return_op(LIR_Opr result) { void LIR_Assembler::return_op(LIR_Opr result) {
const Register return_pc = R11; const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
const Register polling_page = R12; const Register polling_page = R12;
// Pop the stack before the safepoint code. // Pop the stack before the safepoint code.
@ -1265,6 +1265,10 @@ void LIR_Assembler::return_op(LIR_Opr result) {
// Move return pc to LR. // Move return pc to LR.
__ mtlr(return_pc); __ mtlr(return_pc);
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(return_pc);
}
// We need to mark the code position where the load from the safepoint // We need to mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_return_type here. // polling page was emitted as relocInfo::poll_return_type here.
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -52,4 +52,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define INCLUDE_RTM_OPT 1 #define INCLUDE_RTM_OPT 1
#endif #endif
#define SUPPORT_RESERVED_STACK_AREA
#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP

View file

@ -43,7 +43,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs pas
#define DEFAULT_STACK_YELLOW_PAGES (6) #define DEFAULT_STACK_YELLOW_PAGES (6)
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#define DEFAULT_STACK_RESERVED_PAGES (0) #define DEFAULT_STACK_RESERVED_PAGES (1)
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -480,6 +480,7 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register
void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) { void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) {
Label done; Label done;
BLOCK_COMMENT("stack_overflow_check_with_compare_and_throw {");
sub(Rmem_frame_size, R1_SP, Rmem_frame_size); sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
ld(Rscratch1, thread_(stack_overflow_limit)); ld(Rscratch1, thread_(stack_overflow_limit));
cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1); cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
@ -501,6 +502,7 @@ void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_t
align(32, 12); align(32, 12);
bind(done); bind(done);
BLOCK_COMMENT("} stack_overflow_check_with_compare_and_throw");
} }
// Separate these two to allow for delay slot in middle. // Separate these two to allow for delay slot in middle.
@ -805,16 +807,41 @@ void InterpreterMacroAssembler::narrow(Register result) {
void InterpreterMacroAssembler::remove_activation(TosState state, void InterpreterMacroAssembler::remove_activation(TosState state,
bool throw_monitor_exception, bool throw_monitor_exception,
bool install_monitor_exception) { bool install_monitor_exception) {
BLOCK_COMMENT("remove_activation {");
unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
// Save result (push state before jvmti call and pop it afterwards) and notify jvmti. // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
notify_method_exit(false, state, NotifyJVMTI, true); notify_method_exit(false, state, NotifyJVMTI, true);
BLOCK_COMMENT("reserved_stack_check:");
if (StackReservedPages > 0) {
// Test if reserved zone needs to be enabled.
Label no_reserved_zone_enabling;
// Compare frame pointers. There is no good stack pointer, as with stack
// frame compression we can get different SPs when we do calls. A subsequent
// call could have a smaller SP, so that this compare succeeds for an
// inner call of the method annotated with ReservedStack.
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
ld_ptr(R11_scratch1, _abi(callers_sp), R1_SP); // Load frame pointer.
cmpld(CCR0, R11_scratch1, R0);
blt_predict_taken(CCR0, no_reserved_zone_enabling);
// Enable reserved zone again, throw stack overflow exception.
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
verify_oop(R17_tos, state); verify_oop(R17_tos, state);
verify_thread(); verify_thread();
merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
mtlr(R0); mtlr(R0);
BLOCK_COMMENT("} remove_activation");
} }
// Lock object // Lock object

View file

@ -1400,6 +1400,28 @@ address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext)
#endif #endif
} }
void MacroAssembler::reserved_stack_check(Register return_pc) {
// Test if reserved zone needs to be enabled.
Label no_reserved_zone_enabling;
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
cmpld(CCR0, R1_SP, R0);
blt_predict_taken(CCR0, no_reserved_zone_enabling);
// Enable reserved zone again, throw stack overflow exception.
push_frame_reg_args(0, R0);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
pop_frame();
mtlr(return_pc);
load_const_optimized(R0, StubRoutines::throw_delayed_StackOverflowError_entry());
mtctr(R0);
bctr();
should_not_reach_here();
bind(no_reserved_zone_enabling);
}
// CmpxchgX sets condition register to cmpX(current, compare). // CmpxchgX sets condition register to cmpX(current, compare).
void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value, void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
Register compare_value, Register exchange_value, Register compare_value, Register exchange_value,

View file

@ -411,6 +411,10 @@ class MacroAssembler: public Assembler {
// stdux, return the banged address. Otherwise, return 0. // stdux, return the banged address. Otherwise, return 0.
static address get_stack_bang_address(int instruction, void* ucontext); static address get_stack_bang_address(int instruction, void* ucontext);
// Check for reserved stack access in method being exited. If the reserved
// stack area was accessed, protect it again and throw StackOverflowError.
void reserved_stack_check(Register return_pc);
// Atomics // Atomics
// CmpxchgX sets condition register to cmpX(current, compare). // CmpxchgX sets condition register to cmpX(current, compare).
// (flag == ne) => (dest_current_value != compare_value), (!swapped) // (flag == ne) => (dest_current_value != compare_value), (!swapped)

View file

@ -1432,7 +1432,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
const bool method_needs_polling = do_polling() && C->is_method_compilation(); const bool method_needs_polling = do_polling() && C->is_method_compilation();
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/; const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
const Register return_pc = R11; const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
const Register polling_page = R12; const Register polling_page = R12;
if (!method_is_frameless) { if (!method_is_frameless) {
@ -1456,6 +1456,10 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ addi(R1_SP, R1_SP, (int)framesize); __ addi(R1_SP, R1_SP, (int)framesize);
} }
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check(return_pc);
}
if (method_needs_polling) { if (method_needs_polling) {
// We need to mark the code position where the load from the safepoint // We need to mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_return_type here. // polling page was emitted as relocInfo::poll_return_type here.

View file

@ -3082,6 +3082,9 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_StackOverflowError_entry = StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception", generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
// CRC32 Intrinsics. // CRC32 Intrinsics.
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 SAP SE. All rights reserved. * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -128,6 +128,8 @@ class Aix {
// Set PC into context. Needed for continuation after signal. // Set PC into context. Needed for continuation after signal.
static void ucontext_set_pc(ucontext_t* uc, address pc); static void ucontext_set_pc(ucontext_t* uc, address pc);
static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
// This boolean allows users to forward their own non-matching signals // This boolean allows users to forward their own non-matching signals
// to JVM_handle_aix_signal, harmlessly. // to JVM_handle_aix_signal, harmlessly.
static bool signal_handlers_are_installed; static bool signal_handlers_are_installed;

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
*/ */
// no precompiled headers // no precompiled headers
#include "assembler_ppc.inline.hpp" #include "asm/assembler.inline.hpp"
#include "classfile/classLoader.hpp" #include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
@ -145,6 +145,41 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return fr; return fr;
} }
bool os::Aix::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Aix::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// Interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_context(uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// More complex code with compiled code.
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling. In compiled code, we bang before
// the frame is complete.
return false;
} else {
intptr_t* sp = os::Aix::ucontext_get_sp(uc);
*fr = frame(sp, (address)*sp);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
assert(!fr->is_first_frame(), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == NULL) { if (*fr->sp() == NULL) {
// fr is the last C frame // fr is the last C frame
@ -246,14 +281,32 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// to continue with yellow zone disabled, but that doesn't buy us much and prevents // to continue with yellow zone disabled, but that doesn't buy us much and prevents
// hs_err_pid files. // hs_err_pid files.
if (thread->in_stack_yellow_reserved_zone(addr)) { if (thread->in_stack_yellow_reserved_zone(addr)) {
thread->disable_stack_yellow_reserved_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Aix::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Javac frame");
frame activation =
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)activation.fp());
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return 1;
}
}
}
// Throw a stack overflow exception. // Throw a stack overflow exception.
// Guard pages will be reenabled while unwinding the stack. // Guard pages will be reenabled while unwinding the stack.
thread->disable_stack_yellow_reserved_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
goto run_stub; goto run_stub;
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_reserved_zone();
return 1; return 1;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
*/ */
// no precompiled headers // no precompiled headers
#include "assembler_ppc.inline.hpp" #include "asm/assembler.inline.hpp"
#include "classfile/classLoader.hpp" #include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
@ -157,6 +157,42 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return frame(sp, epc.pc()); return frame(sp, epc.pc());
} }
bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
address pc = (address) os::Linux::ucontext_get_pc(uc);
if (Interpreter::contains(pc)) {
// Interpreter performs stack banging after the fixed frame header has
// been generated while the compilers perform it before. To maintain
// semantic consistency between interpreted and compiled frames, the
// method returns the Java sender of the current frame.
*fr = os::fetch_frame_from_context(uc);
if (!fr->is_first_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
*fr = fr->java_sender();
}
} else {
// More complex code with compiled code.
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling. In compiled code, we bang before
// the frame is complete.
return false;
} else {
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
*fr = frame(sp, (address)*sp);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
assert(!fr->is_first_frame(), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
frame os::get_sender_for_C_frame(frame* fr) { frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == 0) { if (*fr->sp() == 0) {
// fr is the last C frame // fr is the last C frame
@ -243,13 +279,31 @@ JVM_handle_linux_signal(int sig,
if (thread->on_local_stack(addr)) { if (thread->on_local_stack(addr)) {
// stack overflow // stack overflow
if (thread->in_stack_yellow_reserved_zone(addr)) { if (thread->in_stack_yellow_reserved_zone(addr)) {
thread->disable_stack_yellow_reserved_zone();
if (thread->thread_state() == _thread_in_Java) { if (thread->thread_state() == _thread_in_Java) {
if (thread->in_stack_reserved_zone(addr)) {
frame fr;
if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
assert(fr.is_java_frame(), "Must be a Javac frame");
frame activation =
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
if (activation.sp() != NULL) {
thread->disable_stack_reserved_zone();
if (activation.is_interpreted_frame()) {
thread->set_reserved_stack_activation((address)activation.fp());
} else {
thread->set_reserved_stack_activation((address)activation.unextended_sp());
}
return 1;
}
}
}
// Throw a stack overflow exception. // Throw a stack overflow exception.
// Guard pages will be reenabled while unwinding the stack. // Guard pages will be reenabled while unwinding the stack.
thread->disable_stack_yellow_reserved_zone();
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else { } else {
// Thread was in the vm or native code. Return and try to finish. // Thread was in the vm or native code. Return and try to finish.
thread->disable_stack_yellow_reserved_zone();
return 1; return 1;
} }
} else if (thread->in_stack_red_zone(addr)) { } else if (thread->in_stack_red_zone(addr)) {

View file

@ -67,6 +67,7 @@
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
@ -76,6 +77,11 @@
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#endif #endif
// helper function to avoid in-line casts
template <typename T> static T* load_ptr_acquire(T* volatile *p) {
return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
}
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -147,20 +153,23 @@ void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
} }
void ClassLoaderData::classes_do(KlassClosure* klass_closure) { void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k); klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!"); assert(k != k->next_link(), "no loops!");
} }
} }
void ClassLoaderData::classes_do(void f(Klass * const)) { void ClassLoaderData::classes_do(void f(Klass * const)) {
assert_locked_or_safepoint(_metaspace_lock);
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
f(k); f(k);
} }
} }
void ClassLoaderData::methods_do(void f(Method*)) { void ClassLoaderData::methods_do(void f(Method*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
InstanceKlass::cast(k)->methods_do(f); InstanceKlass::cast(k)->methods_do(f);
} }
@ -179,7 +188,8 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
} }
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
f(InstanceKlass::cast(k)); f(InstanceKlass::cast(k));
} }
@ -188,6 +198,7 @@ void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
} }
void ClassLoaderData::modules_do(void f(ModuleEntry*)) { void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
assert_locked_or_safepoint(Module_lock);
if (_modules != NULL) { if (_modules != NULL) {
for (int i = 0; i < _modules->table_size(); i++) { for (int i = 0; i < _modules->table_size(); i++) {
for (ModuleEntry* entry = _modules->bucket(i); for (ModuleEntry* entry = _modules->bucket(i);
@ -200,9 +211,11 @@ void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
} }
void ClassLoaderData::packages_do(void f(PackageEntry*)) { void ClassLoaderData::packages_do(void f(PackageEntry*)) {
if (_packages != NULL) { // Lock-free access requires load_ptr_acquire
for (int i = 0; i < _packages->table_size(); i++) { PackageEntryTable* packages = load_ptr_acquire(&_packages);
for (PackageEntry* entry = _packages->bucket(i); if (packages != NULL) {
for (int i = 0; i < packages->table_size(); i++) {
for (PackageEntry* entry = packages->bucket(i);
entry != NULL; entry != NULL;
entry = entry->next()) { entry = entry->next()) {
f(entry); f(entry);
@ -325,10 +338,9 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
Klass* old_value = _klasses; Klass* old_value = _klasses;
k->set_next_link(old_value); k->set_next_link(old_value);
// Make sure linked class is stable, since the class list is walked without a lock // Link the new item into the list, making sure the linked class is stable
OrderAccess::storestore(); // since the list can be walked without a lock
// link the new item into the list OrderAccess::release_store_ptr(&_klasses, k);
_klasses = k;
} }
if (publicize && k->class_loader_data() != NULL) { if (publicize && k->class_loader_data() != NULL) {
@ -343,11 +355,10 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
} }
} }
// This is called by InstanceKlass::deallocate_contents() to remove the // Remove a klass from the _klasses list for scratch_class during redefinition
// scratch_class for redefine classes. We need a lock because there it may not // or parsed class in the case of an error.
// be called at a safepoint if there's an error.
void ClassLoaderData::remove_class(Klass* scratch_class) { void ClassLoaderData::remove_class(Klass* scratch_class) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
Klass* prev = NULL; Klass* prev = NULL;
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k == scratch_class) { if (k == scratch_class) {
@ -390,42 +401,46 @@ void ClassLoaderData::unload() {
PackageEntryTable* ClassLoaderData::packages() { PackageEntryTable* ClassLoaderData::packages() {
// Lazily create the package entry table at first request. // Lazily create the package entry table at first request.
if (_packages == NULL) { // Lock-free access requires load_ptr_acquire.
PackageEntryTable* packages = load_ptr_acquire(&_packages);
if (packages == NULL) {
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Check if _packages got allocated while we were waiting for this lock. // Check if _packages got allocated while we were waiting for this lock.
if (_packages == NULL) { if ((packages = _packages) == NULL) {
_packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size); packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
// Ensure _packages is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_packages, packages);
} }
} }
return _packages; return packages;
} }
ModuleEntryTable* ClassLoaderData::modules() { ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request. // Lazily create the module entry table at first request.
if (_modules == NULL) { // Lock-free access requires load_ptr_acquire.
ModuleEntryTable* modules = load_ptr_acquire(&_modules);
if (modules == NULL) {
MutexLocker m1(Module_lock); MutexLocker m1(Module_lock);
// Check again if _modules has been allocated while we were getting this lock. // Check if _modules got allocated while we were waiting for this lock.
if (_modules != NULL) { if ((modules = _modules) == NULL) {
return _modules; modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
} // Each loader has one unnamed module entry. Create it before
// any classes, loaded by this loader, are defined in case
// they end up being defined in loader's unnamed module.
modules->create_unnamed_module(this);
ModuleEntryTable* temp_table = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size); {
// Each loader has one unnamed module entry. Create it before MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// any classes, loaded by this loader, are defined in case // Ensure _modules is stable, since it is examined without a lock
// they end up being defined in loader's unnamed module. OrderAccess::release_store_ptr(&_modules, modules);
temp_table->create_unnamed_module(this); }
{
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
OrderAccess::storestore();
_modules = temp_table;
} }
} }
return _modules; return modules;
} }
oop ClassLoaderData::keep_alive_object() const { oop ClassLoaderData::keep_alive_object() const {
assert_locked_or_safepoint(_metaspace_lock);
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive"); assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader(); return is_anonymous() ? _klasses->java_mirror() : class_loader();
} }
@ -499,30 +514,33 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
// to create smaller arena for Reflection class loaders also. // to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are // The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own. // simply for delegating with no metadata of their own.
if (_metaspace == NULL) { // Lock-free access requires load_ptr_acquire.
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); Metaspace* metaspace = load_ptr_acquire(&_metaspace);
// Check again if metaspace has been allocated while we were getting this lock. if (metaspace == NULL) {
if (_metaspace != NULL) { MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
return _metaspace; // Check if _metaspace got allocated while we were waiting for this lock.
} if ((metaspace = _metaspace) == NULL) {
if (this == the_null_class_loader_data()) { if (this == the_null_class_loader_data()) {
assert (class_loader() == NULL, "Must be"); assert (class_loader() == NULL, "Must be");
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType)); metaspace = new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
} else if (is_anonymous()) { } else if (is_anonymous()) {
if (class_loader() != NULL) { if (class_loader() != NULL) {
log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name()); log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
}
metaspace = new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
if (class_loader() != NULL) {
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
}
metaspace = new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
} else {
metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
} }
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType)); // Ensure _metaspace is stable, since it is examined without a lock
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) { OrderAccess::release_store_ptr(&_metaspace, metaspace);
if (class_loader() != NULL) {
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
}
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType));
} else {
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType));
} }
} }
return _metaspace; return metaspace;
} }
JNIHandleBlock* ClassLoaderData::handles() const { return _handles; } JNIHandleBlock* ClassLoaderData::handles() const { return _handles; }
@ -638,6 +656,7 @@ void ClassLoaderData::dump(outputStream * const out) {
#endif // PRODUCT #endif // PRODUCT
void ClassLoaderData::verify() { void ClassLoaderData::verify() {
assert_locked_or_safepoint(_metaspace_lock);
oop cl = class_loader(); oop cl = class_loader();
guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same"); guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
@ -656,7 +675,8 @@ void ClassLoaderData::verify() {
} }
bool ClassLoaderData::contains_klass(Klass* klass) { bool ClassLoaderData::contains_klass(Klass* klass) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true; if (k == klass) return true;
} }
return false; return false;
@ -1046,6 +1066,7 @@ ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic
// Find the first klass in the CLDG. // Find the first klass in the CLDG.
while (cld != NULL) { while (cld != NULL) {
assert_locked_or_safepoint(cld->metaspace_lock());
klass = cld->_klasses; klass = cld->_klasses;
if (klass != NULL) { if (klass != NULL) {
_next_klass = klass; _next_klass = klass;
@ -1063,6 +1084,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass)
// No more klasses in the current CLD. Time to find a new CLD. // No more klasses in the current CLD. Time to find a new CLD.
ClassLoaderData* cld = klass->class_loader_data(); ClassLoaderData* cld = klass->class_loader_data();
assert_locked_or_safepoint(cld->metaspace_lock());
while (next == NULL) { while (next == NULL) {
cld = cld->next(); cld = cld->next();
if (cld == NULL) { if (cld == NULL) {

View file

@ -171,8 +171,8 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Dependencies _dependencies; // holds dependencies from this class loader Dependencies _dependencies; // holds dependencies from this class loader
// data to others. // data to others.
Metaspace * _metaspace; // Meta-space where meta-data defined by the Metaspace * volatile _metaspace; // Meta-space where meta-data defined by the
// classes in the class loader are allocated. // classes in the class loader are allocated.
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup. Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away bool _unloading; // true if this class loader goes away
bool _is_anonymous; // if this CLD is for an anonymous class bool _is_anonymous; // if this CLD is for an anonymous class
@ -186,9 +186,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which
// have the same life cycle of the corresponding ClassLoader. // have the same life cycle of the corresponding ClassLoader.
Klass* _klasses; // The classes defined by the class loader. Klass* volatile _klasses; // The classes defined by the class loader.
PackageEntryTable* _packages; // The packages defined by the class loader. PackageEntryTable* volatile _packages; // The packages defined by the class loader.
ModuleEntryTable* _modules; // The modules defined by the class loader. ModuleEntryTable* volatile _modules; // The modules defined by the class loader.
// These method IDs are created for the class loader and set to NULL when the // These method IDs are created for the class loader and set to NULL when the
// class loader is unloaded. They are rarely freed, only for redefine classes // class loader is unloaded. They are rarely freed, only for redefine classes
@ -216,8 +216,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies); ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
~ClassLoaderData(); ~ClassLoaderData();
void set_metaspace(Metaspace* m) { _metaspace = m; }
JNIHandleBlock* handles() const; JNIHandleBlock* handles() const;
void set_handles(JNIHandleBlock* handles); void set_handles(JNIHandleBlock* handles);

View file

@ -34,15 +34,13 @@
#include "utilities/hashtable.inline.hpp" #include "utilities/hashtable.inline.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
// Return true if this package is exported to m. // Returns true if this package specifies m as a qualified export, including through an unnamed export
bool PackageEntry::is_qexported_to(ModuleEntry* m) const { bool PackageEntry::is_qexported_to(ModuleEntry* m) const {
assert(m != NULL, "No module to lookup in this package's qualified exports list"); assert(m != NULL, "No module to lookup in this package's qualified exports list");
MutexLocker m1(Module_lock); MutexLocker m1(Module_lock);
if (!_is_exported) { if (is_exported_allUnnamed() && !m->is_named()) {
return false;
} else if (_is_exported_allUnnamed && !m->is_named()) {
return true; return true;
} else if (_qualified_exports == NULL) { } else if (!has_qual_exports_list()) {
return false; return false;
} else { } else {
return _qualified_exports->contains(m); return _qualified_exports->contains(m);
@ -52,8 +50,7 @@ bool PackageEntry::is_qexported_to(ModuleEntry* m) const {
// Add a module to the package's qualified export list. // Add a module to the package's qualified export list.
void PackageEntry::add_qexport(ModuleEntry* m) { void PackageEntry::add_qexport(ModuleEntry* m) {
assert_locked_or_safepoint(Module_lock); assert_locked_or_safepoint(Module_lock);
assert(_is_exported == true, "Adding a qualified export to a package that is not exported"); if (!has_qual_exports_list()) {
if (_qualified_exports == NULL) {
// Lazily create a package's qualified exports list. // Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large. // Initial size is small, do not anticipate export lists to be large.
_qualified_exports = _qualified_exports =
@ -62,7 +59,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
_qualified_exports->append_if_missing(m); _qualified_exports->append_if_missing(m);
} }
// Set the package's exported state based on the value of the ModuleEntry. // Set the package's exported states based on the value of the ModuleEntry.
void PackageEntry::set_exported(ModuleEntry* m) { void PackageEntry::set_exported(ModuleEntry* m) {
MutexLocker m1(Module_lock); MutexLocker m1(Module_lock);
if (is_unqual_exported()) { if (is_unqual_exported()) {
@ -73,7 +70,7 @@ void PackageEntry::set_exported(ModuleEntry* m) {
if (m == NULL) { if (m == NULL) {
// NULL indicates the package is being unqualifiedly exported // NULL indicates the package is being unqualifiedly exported
if (_is_exported && _qualified_exports != NULL) { if (has_qual_exports_list()) {
// Legit to transition a package from being qualifiedly exported // Legit to transition a package from being qualifiedly exported
// to unqualified. Clean up the qualified lists at the next // to unqualified. Clean up the qualified lists at the next
// safepoint. // safepoint.
@ -85,11 +82,17 @@ void PackageEntry::set_exported(ModuleEntry* m) {
} else { } else {
// Add the exported module // Add the exported module
_is_exported = true;
add_qexport(m); add_qexport(m);
} }
} }
void PackageEntry::set_is_exported_allUnnamed() {
MutexLocker m1(Module_lock);
if (!is_unqual_exported()) {
_is_exported_allUnnamed = true;
}
}
// Remove dead module entries within the package's exported list. // Remove dead module entries within the package's exported list.
void PackageEntry::purge_qualified_exports() { void PackageEntry::purge_qualified_exports() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@ -170,7 +173,7 @@ PackageEntry* PackageEntryTable::new_entry(unsigned int hash, Symbol* name, Modu
if (!module->is_named()) { if (!module->is_named()) {
// Set the exported state to true because all packages // Set the exported state to true because all packages
// within the unnamed module are unqualifiedly exported // within the unnamed module are unqualifiedly exported
entry->set_exported(true); entry->set_unqual_exported();
} }
entry->set_module(module); entry->set_module(module);
return entry; return entry;
@ -248,6 +251,20 @@ void PackageEntryTable::verify_javabase_packages(GrowableArray<Symbol*> *pkg_lis
} }
// iteration of qualified exports
void PackageEntry::package_exports_do(ModuleClosure* const f) {
assert_locked_or_safepoint(Module_lock);
assert(f != NULL, "invariant");
if (has_qual_exports_list()) {
int qe_len = _qualified_exports->length();
for (int i = 0; i < qe_len; ++i) {
f->do_module(_qualified_exports->at(i));
}
}
}
// Remove dead entries from all packages' exported list // Remove dead entries from all packages' exported list
void PackageEntryTable::purge_all_package_exports() { void PackageEntryTable::purge_all_package_exports() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@ -281,10 +298,10 @@ void PackageEntryTable::print(outputStream* st) {
void PackageEntry::print(outputStream* st) { void PackageEntry::print(outputStream* st) {
ResourceMark rm; ResourceMark rm;
st->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index " st->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index "
INT32_FORMAT " is_exported %d is_exported_allUnnamed %d " "next "PTR_FORMAT, INT32_FORMAT " is_exported_unqualified %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
p2i(this), name()->as_C_string(), p2i(this), name()->as_C_string(),
(module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE), (module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE),
_classpath_index, _is_exported, _is_exported_allUnnamed, p2i(next())); _classpath_index, _is_exported_unqualified, _is_exported_allUnnamed, p2i(next()));
} }
void PackageEntryTable::verify() { void PackageEntryTable::verify() {
@ -305,17 +322,3 @@ void PackageEntryTable::verify() {
void PackageEntry::verify() { void PackageEntry::verify() {
guarantee(name() != NULL, "A package entry must have a corresponding symbol name."); guarantee(name() != NULL, "A package entry must have a corresponding symbol name.");
} }
// iteration of qualified exports
void PackageEntry::package_exports_do(ModuleClosure* const f) {
assert_locked_or_safepoint(Module_lock);
assert(f != NULL, "invariant");
if (is_qual_exported()) {
int qe_len = _qualified_exports->length();
for (int i = 0; i < qe_len; ++i) {
f->do_module(_qualified_exports->at(i));
}
}
}

View file

@ -34,16 +34,32 @@
// A PackageEntry basically represents a Java package. It contains: // A PackageEntry basically represents a Java package. It contains:
// - Symbol* containing the package's name. // - Symbol* containing the package's name.
// - ModuleEntry* for this package's containing module. // - ModuleEntry* for this package's containing module.
// - a flag indicating if package is exported, either qualifiedly or // - a flag indicating if package is exported unqualifiedly
// unqualifiedly.
// - a flag indicating if this package is exported to all unnamed modules. // - a flag indicating if this package is exported to all unnamed modules.
// - a growable array containing other module entries that this // - a growable array containing other module entries that this
// package is exported to. // package is exported to.
// //
// Packages that are: // Packages can be exported in the following 3 ways:
// - not exported: _qualified_exports = NULL && _is_exported is false // - not exported: the package has not been explicitly qualified to a
// - qualified exports: (_qualified_exports != NULL || _is_exported_allUnnamed is true) && _is_exported is true // particular module nor has it been specified to be
// - unqualified exports: (_qualified_exports = NULL && _is_exported_allUnnamed is false) && _is_exported is true // unqualifiedly exported to all modules. If all states
// of exportedness are false, the package is considered
// not exported.
// - qualified exports: the package has been explicitly qualified to at least
// one particular module or has been qualifiedly exported
// to all unnamed modules.
// Note: _is_exported_allUnnamed is a form of a qualified
// export. It is equivalent to the package being
// explicitly exported to all current and future unnamed modules.
// - unqualified exports: the package is exported to all modules.
//
// A package can transition from:
// - being not exported, to being exported either in a qualified or unqualified manner
// - being qualifiedly exported, to unqualifiedly exported. Its exported scope is widened.
//
// A package cannot transition from:
// - being unqualifiedly exported, to exported qualifiedly to a specific module.
// This transition attempt is silently ignored in set_exported.
// //
// The Mutex Module_lock is shared between ModuleEntry and PackageEntry, to lock either // The Mutex Module_lock is shared between ModuleEntry and PackageEntry, to lock either
// data structure. // data structure.
@ -55,7 +71,7 @@ private:
// loaded by the boot loader from -Xbootclasspath/a in an unnamed module, it // loaded by the boot loader from -Xbootclasspath/a in an unnamed module, it
// indicates from which class path entry. // indicates from which class path entry.
s2 _classpath_index; s2 _classpath_index;
bool _is_exported; bool _is_exported_unqualified;
bool _is_exported_allUnnamed; bool _is_exported_allUnnamed;
GrowableArray<ModuleEntry*>* _exported_pending_delete; // transitioned from qualified to unqualified, delete at safepoint GrowableArray<ModuleEntry*>* _exported_pending_delete; // transitioned from qualified to unqualified, delete at safepoint
GrowableArray<ModuleEntry*>* _qualified_exports; GrowableArray<ModuleEntry*>* _qualified_exports;
@ -68,7 +84,7 @@ public:
void init() { void init() {
_module = NULL; _module = NULL;
_classpath_index = -1; _classpath_index = -1;
_is_exported = false; _is_exported_unqualified = false;
_is_exported_allUnnamed = false; _is_exported_allUnnamed = false;
_exported_pending_delete = NULL; _exported_pending_delete = NULL;
_qualified_exports = NULL; _qualified_exports = NULL;
@ -83,34 +99,41 @@ public:
void set_module(ModuleEntry* m) { _module = m; } void set_module(ModuleEntry* m) { _module = m; }
// package's export state // package's export state
bool is_exported() const { return _is_exported; } // qualifiedly or unqualifiedly exported bool is_exported() const { // qualifiedly or unqualifiedly exported
return (is_unqual_exported() || has_qual_exports_list() || is_exported_allUnnamed());
}
// Returns true if the package has any explicit qualified exports or is exported to all unnamed
bool is_qual_exported() const { bool is_qual_exported() const {
return (_is_exported && (_qualified_exports != NULL || _is_exported_allUnnamed)); return (has_qual_exports_list() || is_exported_allUnnamed());
}
// Returns true if there are any explicit qualified exports
bool has_qual_exports_list() const {
assert(!(_qualified_exports != NULL && _is_exported_unqualified),
"_qualified_exports set at same time as _is_exported_unqualified");
return (_qualified_exports != NULL);
}
bool is_exported_allUnnamed() const {
assert(!(_is_exported_allUnnamed && _is_exported_unqualified),
"_is_exported_allUnnamed set at same time as _is_exported_unqualified");
return _is_exported_allUnnamed;
} }
bool is_unqual_exported() const { bool is_unqual_exported() const {
return (_is_exported && (_qualified_exports == NULL && !_is_exported_allUnnamed)); assert(!(_qualified_exports != NULL && _is_exported_unqualified),
"_qualified_exports set at same time as _is_exported_unqualified");
assert(!(_is_exported_allUnnamed && _is_exported_unqualified),
"_is_exported_allUnnamed set at same time as _is_exported_unqualified");
return _is_exported_unqualified;
} }
void set_unqual_exported() { void set_unqual_exported() {
_is_exported = true; _is_exported_unqualified = true;
_is_exported_allUnnamed = false; _is_exported_allUnnamed = false;
_qualified_exports = NULL; _qualified_exports = NULL;
} }
bool exported_pending_delete() const { return (_exported_pending_delete != NULL); } bool exported_pending_delete() const { return (_exported_pending_delete != NULL); }
void set_exported(bool e) { _is_exported = e; }
void set_exported(ModuleEntry* m); void set_exported(ModuleEntry* m);
void set_is_exported_allUnnamed() { void set_is_exported_allUnnamed();
if (!is_unqual_exported()) {
_is_exported_allUnnamed = true;
_is_exported = true;
}
}
bool is_exported_allUnnamed() const {
assert(_is_exported || !_is_exported_allUnnamed,
"is_allUnnamed set without is_exported being set");
return _is_exported_allUnnamed;
}
void set_classpath_index(s2 classpath_index) { void set_classpath_index(s2 classpath_index) {
_classpath_index = classpath_index; _classpath_index = classpath_index;
@ -122,7 +145,7 @@ public:
// returns true if the package is defined in the unnamed module // returns true if the package is defined in the unnamed module
bool in_unnamed_module() const { return !_module->is_named(); } bool in_unnamed_module() const { return !_module->is_named(); }
// returns true if the package specifies m as a qualified export // returns true if the package specifies m as a qualified export, including through an unnamed export
bool is_qexported_to(ModuleEntry* m) const; bool is_qexported_to(ModuleEntry* m) const;
// add the module to the package's qualified exports // add the module to the package's qualified exports

View file

@ -966,20 +966,18 @@ void LinkResolver::resolve_static_call(CallInfo& result,
methodHandle resolved_method = linktime_resolve_static_method(link_info, CHECK); methodHandle resolved_method = linktime_resolve_static_method(link_info, CHECK);
// The resolved class can change as a result of this resolution. // The resolved class can change as a result of this resolution.
KlassHandle resolved_klass = KlassHandle(THREAD, resolved_method->method_holder()); KlassHandle resolved_klass(THREAD, resolved_method->method_holder());
Method* save_resolved_method = resolved_method();
// Initialize klass (this should only happen if everything is ok) // Initialize klass (this should only happen if everything is ok)
if (initialize_class && resolved_klass->should_be_initialized()) { if (initialize_class && resolved_klass->should_be_initialized()) {
resolved_klass->initialize(CHECK); resolved_klass->initialize(CHECK);
// Use updated LinkInfo (to reresolve with resolved_klass as method_holder?) // Use updated LinkInfo to reresolve with resolved method holder
LinkInfo new_info(resolved_klass, link_info.name(), link_info.signature(), LinkInfo new_info(resolved_klass, link_info.name(), link_info.signature(),
link_info.current_klass(), link_info.current_klass(),
link_info.check_access() ? LinkInfo::needs_access_check : LinkInfo::skip_access_check); link_info.check_access() ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
resolved_method = linktime_resolve_static_method(new_info, CHECK); resolved_method = linktime_resolve_static_method(new_info, CHECK);
} }
assert(save_resolved_method == resolved_method(), "does this change?");
// setup result // setup result
result.set_static(resolved_klass, resolved_method, CHECK); result.set_static(resolved_klass, resolved_method, CHECK);
} }

View file

@ -191,7 +191,12 @@ static void rewrite_nofast_bytecode(Method* method) {
case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break; case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break;
case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break; case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break;
case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break; case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break;
case Bytecodes::_iload: *bcs.bcp() = Bytecodes::_nofast_iload; break; case Bytecodes::_iload: {
if (!bcs.is_wide()) {
*bcs.bcp() = Bytecodes::_nofast_iload;
}
break;
}
default: break; default: break;
} }
} }

View file

@ -1104,21 +1104,21 @@ void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_k, TRAP
void InstanceKlass::mask_for(const methodHandle& method, int bci, void InstanceKlass::mask_for(const methodHandle& method, int bci,
InterpreterOopMap* entry_for) { InterpreterOopMap* entry_for) {
// Dirty read, then double-check under a lock. // Lazily create the _oop_map_cache at first request
if (_oop_map_cache == NULL) { // Lock-free access requires load_ptr_acquire.
// Otherwise, allocate a new one. OopMapCache* oop_map_cache =
static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
if (oop_map_cache == NULL) {
MutexLocker x(OopMapCacheAlloc_lock); MutexLocker x(OopMapCacheAlloc_lock);
// First time use. Allocate a cache in C heap // Check if _oop_map_cache was allocated while we were waiting for this lock
if (_oop_map_cache == NULL) { if ((oop_map_cache = _oop_map_cache) == NULL) {
// Release stores from OopMapCache constructor before assignment oop_map_cache = new OopMapCache();
// to _oop_map_cache. C++ compilers on ppc do not emit the // Ensure _oop_map_cache is stable, since it is examined without a lock
// required memory barrier only because of the volatile OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
// qualifier of _oop_map_cache.
OrderAccess::release_store_ptr(&_oop_map_cache, new OopMapCache());
} }
} }
// _oop_map_cache is constant after init; lookup below does is own locking. // _oop_map_cache is constant after init; lookup below does its own locking.
_oop_map_cache->lookup(method, bci, entry_for); oop_map_cache->lookup(method, bci, entry_for);
} }

View file

@ -23,7 +23,10 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadCritical.hpp" #include "runtime/threadCritical.hpp"
#include "services/memTracker.hpp"
#include "services/virtualMemoryTracker.hpp" #include "services/virtualMemoryTracker.hpp"
size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
@ -52,46 +55,41 @@ bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const
if (all_committed()) return true; if (all_committed()) return true;
CommittedMemoryRegion committed_rgn(addr, size, stack); CommittedMemoryRegion committed_rgn(addr, size, stack);
LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn); LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
if (node != NULL) {
while (node != NULL) {
CommittedMemoryRegion* rgn = node->data(); CommittedMemoryRegion* rgn = node->data();
if (rgn->same_region(addr, size)) { if (rgn->same_region(addr, size)) {
return true; return true;
} }
if (rgn->adjacent_to(addr, size)) { if (rgn->adjacent_to(addr, size)) {
// check if the next region covers this committed region, // special case to expand prior region if there is no next region
// the regions may not be merged due to different call stacks LinkedListNode<CommittedMemoryRegion>* next = node->next();
LinkedListNode<CommittedMemoryRegion>* next = if (next == NULL && rgn->call_stack()->equals(stack)) {
node->next();
if (next != NULL && next->data()->contain_region(addr, size)) {
if (next->data()->same_region(addr, size)) {
next->data()->set_call_stack(stack);
}
return true;
}
if (rgn->call_stack()->equals(stack)) {
VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag()); VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
// the two adjacent regions have the same call stack, merge them // the two adjacent regions have the same call stack, merge them
rgn->expand_region(addr, size); rgn->expand_region(addr, size);
VirtualMemorySummary::record_committed_memory(rgn->size(), flag()); VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
return true; return true;
} }
VirtualMemorySummary::record_committed_memory(size, flag());
if (rgn->base() > addr) {
return _committed_regions.insert_before(committed_rgn, node) != NULL;
} else {
return _committed_regions.insert_after(committed_rgn, node) != NULL;
} }
if (rgn->overlap_region(addr, size)) {
// Clear a space for this region in the case it overlaps with any regions.
remove_uncommitted_region(addr, size);
break; // commit below
} }
assert(rgn->contain_region(addr, size), "Must cover this region"); if (rgn->end() >= addr + size){
return true; break;
} else { }
node = node->next();
}
// New committed region // New committed region
VirtualMemorySummary::record_committed_memory(size, flag()); VirtualMemorySummary::record_committed_memory(size, flag());
return add_committed_region(committed_rgn); return add_committed_region(committed_rgn);
} }
}
void ReservedMemoryRegion::set_all_committed(bool b) { void ReservedMemoryRegion::set_all_committed(bool b) {
if (all_committed() != b) { if (all_committed() != b) {
@ -175,48 +173,52 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
} }
} }
} else { } else {
// we have to walk whole list to remove the committed regions in CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
// specified range address end = addr + sz;
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
VirtualMemoryRegion uncommitted_rgn(addr, sz);
while (head != NULL && !uncommitted_rgn.is_empty()) { LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
CommittedMemoryRegion* crgn = head->data(); LinkedListNode<CommittedMemoryRegion>* prev = NULL;
// this committed region overlaps to region to uncommit CommittedMemoryRegion* crgn;
if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { while (head != NULL) {
// find matched region, remove the node will do crgn = head->data();
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
if (crgn->same_region(addr, sz)) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
_committed_regions.remove_after(prev); _committed_regions.remove_after(prev);
return true; return true;
} else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { }
// this committed region contains whole uncommitted region
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag()); // del_rgn contains crgn
return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size()); if (del_rgn.contain_region(crgn->base(), crgn->size())) {
} else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
// this committed region has been uncommitted
size_t exclude_size = crgn->end() - uncommitted_rgn.base();
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
LinkedListNode<CommittedMemoryRegion>* tmp = head;
head = head->next(); head = head->next();
_committed_regions.remove_after(prev); _committed_regions.remove_after(prev);
continue; continue; // don't update head or prev
} else if (crgn->contain_address(uncommitted_rgn.base())) {
size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
} else if (uncommitted_rgn.contain_address(crgn->base())) {
size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
} }
// Found addr in the current crgn. There are 2 subcases:
if (crgn->contain_address(addr)) {
// (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
if (crgn->contain_address(end - 1)) {
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
return remove_uncommitted_region(head, addr, sz); // done!
} else {
// (2) Did not find del_rgn's end in crgn.
size_t size = crgn->end() - del_rgn.base();
crgn->exclude_region(addr, size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
} }
} else if (crgn->contain_address(end - 1)) {
// Found del_rgn's end, but not its base addr.
size_t size = del_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
return true; // should be done if the list is sorted properly!
}
prev = head; prev = head;
head = head->next(); head = head->next();
} }
@ -386,7 +388,8 @@ bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
assert(reserved_rgn != NULL, "No reserved region"); assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->add_committed_region(addr, size, stack); bool result = reserved_rgn->add_committed_region(addr, size, stack);
return result;
} }
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
@ -398,7 +401,8 @@ bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size)
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != NULL, "No reserved region"); assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->remove_uncommitted_region(addr, size); bool result = reserved_rgn->remove_uncommitted_region(addr, size);
return result;
} }
bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
@ -488,5 +492,3 @@ bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel
return true; return true;
} }

View file

@ -261,8 +261,7 @@ class CommittedMemoryRegion : public VirtualMemoryRegion {
VirtualMemoryRegion(addr, size), _stack(stack) { } VirtualMemoryRegion(addr, size), _stack(stack) { }
inline int compare(const CommittedMemoryRegion& rgn) const { inline int compare(const CommittedMemoryRegion& rgn) const {
if (overlap_region(rgn.base(), rgn.size()) || if (overlap_region(rgn.base(), rgn.size())) {
adjacent_to (rgn.base(), rgn.size())) {
return 0; return 0;
} else { } else {
if (base() == rgn.base()) { if (base() == rgn.base()) {

View file

@ -93,12 +93,12 @@ static void print_flag_error_message_if_needed(Flag::Error error, const char* na
// set a boolean global flag // set a boolean global flag
Flag::Error WriteableFlags::set_bool_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) { Flag::Error WriteableFlags::set_bool_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
int value = true; if ((strcasecmp(arg, "true") == 0) || (*arg == '1' && *(arg + 1) == 0)) {
return set_bool_flag(name, true, origin, err_msg);
if (sscanf(arg, "%d", &value)) { } else if ((strcasecmp(arg, "false") == 0) || (*arg == '0' && *(arg + 1) == 0)) {
return set_bool_flag(name, value != 0, origin, err_msg); return set_bool_flag(name, false, origin, err_msg);
} }
err_msg.print("flag value must be a boolean (1 or 0)"); err_msg.print("flag value must be a boolean (1/0 or true/false)");
return Flag::WRONG_FORMAT; return Flag::WRONG_FORMAT;
} }

View file

@ -259,6 +259,11 @@ template <class E, ResourceObj::allocation_type T = ResourceObj::C_HEAP,
virtual bool remove(LinkedListNode<E>* node) { virtual bool remove(LinkedListNode<E>* node) {
LinkedListNode<E>* p = this->head(); LinkedListNode<E>* p = this->head();
if (p == node) {
this->set_head(p->next());
delete_node(node);
return true;
}
while (p != NULL && p->next() != node) { while (p != NULL && p->next() != node) {
p = p->next(); p = p->next();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,22 +23,15 @@
/** /**
* @test TestSmallHeap * @test TestSmallHeap
* @bug 8067438 * @bug 8067438 8152239
* @requires vm.gc=="null" * @requires vm.gc=="null"
* @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
* @requires vm.compMode != "Xcomp"
* @requires vm.opt.UseCompressedOops != false
* @summary Verify that starting the VM with a small heap works * @summary Verify that starting the VM with a small heap works
* @library /testlibrary /test/lib * @library /testlibrary /test/lib /test/lib/share/classes
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.management/sun.management * @modules java.management/sun.management
* @ignore 8076621
* @build TestSmallHeap * @build TestSmallHeap
* @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseParallelGC TestSmallHeap * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseSerialGC TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseG1GC TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseConcMarkSweepGC TestSmallHeap
*/ */
/* Note: It would be nice to verify the minimal supported heap size (2m) here, /* Note: It would be nice to verify the minimal supported heap size (2m) here,
@ -60,23 +53,55 @@
* So, the expected heap size is page_size * 512. * So, the expected heap size is page_size * 512.
*/ */
import jdk.test.lib.*; import jdk.test.lib.Asserts;
import com.sun.management.HotSpotDiagnosticMXBean; import jdk.test.lib.process.OutputAnalyzer;
import java.lang.management.ManagementFactory; import jdk.test.lib.process.ProcessTools;
import static jdk.test.lib.Asserts.*;
import java.util.LinkedList;
import sun.hotspot.WhiteBox; import sun.hotspot.WhiteBox;
public class TestSmallHeap { public class TestSmallHeap {
public static void main(String[] args) { public static void main(String[] args) throws Exception {
// Do all work in the VM driving the test, the VM
// with the small heap size should do as little as
// possible to avoid hitting an OOME.
WhiteBox wb = WhiteBox.getWhiteBox(); WhiteBox wb = WhiteBox.getWhiteBox();
int pageSize = wb.getVMPageSize(); int pageSize = wb.getVMPageSize();
int heapBytesPerCard = 512; int heapBytesPerCard = 512;
long expectedMaxHeap = pageSize * heapBytesPerCard; long expectedMaxHeap = pageSize * heapBytesPerCard;
String maxHeap
= ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class) verifySmallHeapSize("-XX:+UseParallelGC", expectedMaxHeap);
.getVMOption("MaxHeapSize").getValue(); verifySmallHeapSize("-XX:+UseSerialGC", expectedMaxHeap);
assertEQ(Long.parseLong(maxHeap), expectedMaxHeap); verifySmallHeapSize("-XX:+UseG1GC", expectedMaxHeap);
verifySmallHeapSize("-XX:+UseConcMarkSweepGC", expectedMaxHeap);
}
private static void verifySmallHeapSize(String gc, long expectedMaxHeap) throws Exception {
LinkedList<String> vmOptions = new LinkedList<>();
vmOptions.add(gc);
vmOptions.add("-Xmx2m");
vmOptions.add("-XX:+PrintFlagsFinal");
vmOptions.add(VerifyHeapSize.class.getName());
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[0]));
OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
analyzer.shouldHaveExitValue(0);
long maxHeapSize = Long.parseLong(analyzer.firstMatch("MaxHeapSize.+=\\s+(\\d+)",1));
long actualHeapSize = Long.parseLong(analyzer.firstMatch(VerifyHeapSize.actualMsg + "(\\d+)",1));
Asserts.assertEQ(maxHeapSize, expectedMaxHeap);
Asserts.assertLessThanOrEqual(actualHeapSize, maxHeapSize);
}
}
class VerifyHeapSize {
public static final String actualMsg = "Actual heap size: ";
public static void main(String args[]) {
// Avoid string concatenation
System.out.print(actualMsg);
System.out.println(Runtime.getRuntime().maxMemory());
} }
} }

View file

@ -28,6 +28,7 @@
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.FlightRecorder != true * @requires vm.opt.FlightRecorder != true
* @requires vm.opt.ExplicitGCInvokesConcurrent != true * @requires vm.opt.ExplicitGCInvokesConcurrent != true
* @requires vm.opt.MaxGCPauseMillis == "null"
* @library /testlibrary /test/lib / * @library /testlibrary /test/lib /
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.management * @modules java.management

View file

@ -25,6 +25,7 @@
* @test TestLogging * @test TestLogging
* @summary Check that a mixed GC is reflected in the gc logs * @summary Check that a mixed GC is reflected in the gc logs
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.MaxGCPauseMillis == "null"
* @library /testlibrary /test/lib * @library /testlibrary /test/lib
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.management * @modules java.management

View file

@ -33,6 +33,7 @@ import sun.hotspot.WhiteBox;
* @key stress * @key stress
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires os.maxMemory > 2G * @requires os.maxMemory > 2G
* @requires vm.opt.MaxGCPauseMillis == "null"
* *
* @summary Stress G1 Remembered Set using multiple threads * @summary Stress G1 Remembered Set using multiple threads
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc

View file

@ -30,6 +30,7 @@ import sun.hotspot.WhiteBox;
* @bug 8146984 8147087 * @bug 8146984 8147087
* @requires vm.gc=="G1" | vm.gc=="null" * @requires vm.gc=="G1" | vm.gc=="null"
* @requires os.maxMemory > 3G * @requires os.maxMemory > 3G
* @requires vm.opt.MaxGCPauseMillis == "null"
* *
* @summary Stress G1 Remembered Set by creating a lot of cross region links * @summary Stress G1 Remembered Set by creating a lot of cross region links
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc

View file

@ -0,0 +1,144 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Test commits of overlapping regions of memory.
* @key nmt jcmd
* @library /testlibrary /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @build CommitOverlappingRegions
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail CommitOverlappingRegions
*/
import jdk.test.lib.*;
import sun.hotspot.WhiteBox;
public class CommitOverlappingRegions {
public static WhiteBox wb = WhiteBox.getWhiteBox();
public static void main(String args[]) throws Exception {
OutputAnalyzer output;
long size = 32 * 1024;
long addr = wb.NMTReserveMemory(8*size);
String pid = Long.toString(ProcessTools.getProcessId());
ProcessBuilder pb = new ProcessBuilder();
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
System.out.println("Address is " + Long.toHexString(addr));
// Start: . . . . . . . .
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=0KB)");
// Committing: * * * . . . . .
// Region: * * * . . . . .
// Expected Total: 3 x 32KB = 96KB
wb.NMTCommitMemory(addr + 0*size, 3*size);
// Committing: . . . . * * * .
// Region: * * * . * * * .
// Expected Total: 6 x 32KB = 192KB
wb.NMTCommitMemory(addr + 4*size, 3*size);
// Check output after first 2 commits.
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=192KB)");
// Committing: . . * * * . . .
// Region: * * * * * * * .
// Expected Total: 7 x 32KB = 224KB
wb.NMTCommitMemory(addr + 2*size, 3*size);
// Check output after overlapping commit.
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=224KB)");
// Uncommitting: * * * * * * * *
// Region: . . . . . . . .
// Expected Total: 0 x 32KB = 0KB
wb.NMTUncommitMemory(addr + 0*size, 8*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=0KB)");
// Committing: * * . . . . . .
// Region: * * . . . . . .
// Expected Total: 2 x 32KB = 64KB
wb.NMTCommitMemory(addr + 0*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=64KB)");
// Committing: . * * * . . . .
// Region: * * * * . . . .
// Expected Total: 4 x 32KB = 128KB
wb.NMTCommitMemory(addr + 1*size, 3*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=128KB)");
// Uncommitting: * * * . . . . .
// Region: . . . * . . . .
// Expected Total: 1 x 32KB = 32KB
wb.NMTUncommitMemory(addr + 0*size, 3*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=32KB)");
// Committing: . . . * * . . .
// Region: . . . * * . . .
// Expected Total: 2 x 32KB = 64KB
wb.NMTCommitMemory(addr + 3*size, 2*size);
System.out.println("Address is " + Long.toHexString(addr + 3*size));
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=64KB)");
// Committing: . . . . * * . .
// Region: . . . * * * . .
// Expected Total: 3 x 32KB = 96KB
wb.NMTCommitMemory(addr + 4*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=96KB)");
// Committing: . . . . . * * .
// Region: . . . * * * * .
// Expected Total: 4 x 32KB = 128KB
wb.NMTCommitMemory(addr + 5*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=128KB)");
// Committing: . . . . . . * *
// Region: . . . * * * * *
// Expected Total: 5 x 32KB = 160KB
wb.NMTCommitMemory(addr + 6*size, 2*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=160KB)");
// Uncommitting: * * * * * * * *
// Region: . . . . . . . .
// Expected Total: 0 x 32KB = 32KB
wb.NMTUncommitMemory(addr + 0*size, 8*size);
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB, committed=0KB)");
}
}

View file

@ -27,6 +27,7 @@
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* @modules java.base/jdk.internal.vm.annotation * @modules java.base/jdk.internal.vm.annotation
* @build jdk.test.lib.* * @build jdk.test.lib.*
* @run main/othervm -Xint ReservedStackTest
* @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest * @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
*/ */
@ -196,9 +197,12 @@ public class ReservedStackTest {
System.out.println("Test started execution at frame = " + (counter - deframe)); System.out.println("Test started execution at frame = " + (counter - deframe));
String result = test.getResult(); String result = test.getResult();
// The feature is not fully implemented on all platforms, // The feature is not fully implemented on all platforms,
// corruptions are still possible // corruptions are still possible.
boolean supportedPlatform = Platform.isSolaris() || Platform.isOSX() boolean supportedPlatform =
|| (Platform.isLinux() && (Platform.isX86() || Platform.isX64())); Platform.isAix() ||
(Platform.isLinux() && (Platform.isPPC() || Platform.isX64() || Platform.isX86())) ||
Platform.isOSX() ||
Platform.isSolaris();
if (supportedPlatform && !result.contains("PASSED")) { if (supportedPlatform && !result.contains("PASSED")) {
System.out.println(result); System.out.println(result);
throw new Error(result); throw new Error(result);

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test ReservedStackTestCompiler
* @summary Run ReservedStackTest with dedicated compilers C1 and C2.
* @requires vm.flavor == "server"
* @library /testlibrary
* @modules java.base/jdk.internal.misc
* @modules java.base/jdk.internal.vm.annotation
* @build jdk.test.lib.* ReservedStackTest
* @run main/othervm -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
* @run main/othervm -XX:-TieredCompilation -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
*/
// Intentionally left blank. Just runs ReservedStackTest with @requires annotation.

View file

@ -56,6 +56,25 @@ public class SetVMFlagTest {
run(new JMXExecutor()); run(new JMXExecutor());
} }
private void setMutableFlagInternal(CommandExecutor executor, String flag,
boolean val, boolean isNumeric) {
String strFlagVal;
if (isNumeric) {
strFlagVal = val ? "1" : "0";
} else {
strFlagVal = val ? "true" : "false";
}
OutputAnalyzer out = executor.execute("VM.set_flag " + flag + " " + strFlagVal);
out.stderrShouldBeEmpty();
out = getAllFlags(executor);
String newFlagVal = out.firstMatch(MANAGEABLE_PATTERN.replace("(\\S+)", flag), 1);
assertNotEquals(newFlagVal, val ? "1" : "0");
}
private void setMutableFlag(CommandExecutor executor) { private void setMutableFlag(CommandExecutor executor) {
OutputAnalyzer out = getAllFlags(executor); OutputAnalyzer out = getAllFlags(executor);
String flagName = out.firstMatch(MANAGEABLE_PATTERN, 1); String flagName = out.firstMatch(MANAGEABLE_PATTERN, 1);
@ -69,15 +88,8 @@ public class SetVMFlagTest {
} }
Boolean blnVal = Boolean.parseBoolean(flagVal); Boolean blnVal = Boolean.parseBoolean(flagVal);
setMutableFlagInternal(executor, flagName, !blnVal, true);
out = executor.execute("VM.set_flag " + flagName + " " + (blnVal ? 0 : 1)); setMutableFlagInternal(executor, flagName, blnVal, false);
out.stderrShouldBeEmpty();
out = getAllFlags(executor);
String newFlagVal = out.firstMatch(MANAGEABLE_PATTERN.replace("(\\S+)", flagName), 1);
assertNotEquals(newFlagVal, flagVal);
} }
private void setMutableFlagWithInvalidValue(CommandExecutor executor) { private void setMutableFlagWithInvalidValue(CommandExecutor executor) {
@ -95,7 +107,7 @@ public class SetVMFlagTest {
// a boolean flag accepts only 0/1 as its value // a boolean flag accepts only 0/1 as its value
out = executor.execute("VM.set_flag " + flagName + " unexpected_value"); out = executor.execute("VM.set_flag " + flagName + " unexpected_value");
out.stderrShouldBeEmpty(); out.stderrShouldBeEmpty();
out.stdoutShouldContain("flag value must be a boolean (1 or 0)"); out.stdoutShouldContain("flag value must be a boolean (1/0 or true/false)");
out = getAllFlags(executor); out = getAllFlags(executor);