This commit is contained in:
Dean Long 2016-09-20 16:34:45 -04:00
commit e2e8ee17d0
1861 changed files with 47312 additions and 11099 deletions

View file

@ -376,3 +376,6 @@ e613affb88d178dc7c589f1679db113d589bddb4 jdk-9+130
4d2a15091124488080d65848b704e25599b2aaeb jdk-9+131 4d2a15091124488080d65848b704e25599b2aaeb jdk-9+131
2e83d21d78cd9c1d52e6cd2599e9c8aa36ea1f52 jdk-9+132 2e83d21d78cd9c1d52e6cd2599e9c8aa36ea1f52 jdk-9+132
e17429a7e843c4a4ed3651458d0f950970edcbcc jdk-9+133 e17429a7e843c4a4ed3651458d0f950970edcbcc jdk-9+133
a71210c0d9800eb6925b61ecd6198abd554f90ee jdk-9+134
e384420383a5b79fa0012ebcb25d8f83cff7f777 jdk-9+135
1b4b5d01aa11edf24b6fadbe3d2f3e411e3b02cd jdk-9+136

View file

@ -376,3 +376,6 @@ d94d54a3192fea79234c3ac55cd0b4052d45e954 jdk-9+130
8728756c2f70a79a90188f4019cfd6b9a275765c jdk-9+131 8728756c2f70a79a90188f4019cfd6b9a275765c jdk-9+131
a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132 a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132
be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133 be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133
065724348690eda41fc69112278d8da6dcde548c jdk-9+134
82b94cb5f342319d2cda77f9fa59703ad7fde576 jdk-9+135
3ec350f5f32af249b59620d7e37b54bdcd77b233 jdk-9+136

View file

@ -5095,7 +5095,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE #CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks: # Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1470863189 DATE_WHEN_GENERATED=1472718471
############################################################################### ###############################################################################
# #
@ -15944,6 +15944,8 @@ $as_echo "$COMPILE_TYPE" >&6; }
HOTSPOT_TARGET_CPU_DEFINE=S390 HOTSPOT_TARGET_CPU_DEFINE=S390
elif test "x$OPENJDK_TARGET_CPU" = xs390x; then elif test "x$OPENJDK_TARGET_CPU" = xs390x; then
HOTSPOT_TARGET_CPU_DEFINE=S390 HOTSPOT_TARGET_CPU_DEFINE=S390
elif test "x$OPENJDK_TARGET_CPU" != x; then
HOTSPOT_TARGET_CPU_DEFINE=$(echo $OPENJDK_TARGET_CPU | tr a-z A-Z)
fi fi
@ -16117,6 +16119,8 @@ $as_echo "$COMPILE_TYPE" >&6; }
HOTSPOT_BUILD_CPU_DEFINE=S390 HOTSPOT_BUILD_CPU_DEFINE=S390
elif test "x$OPENJDK_BUILD_CPU" = xs390x; then elif test "x$OPENJDK_BUILD_CPU" = xs390x; then
HOTSPOT_BUILD_CPU_DEFINE=S390 HOTSPOT_BUILD_CPU_DEFINE=S390
elif test "x$OPENJDK_BUILD_CPU" != x; then
HOTSPOT_BUILD_CPU_DEFINE=$(echo $OPENJDK_BUILD_CPU | tr a-z A-Z)
fi fi

View file

@ -454,6 +454,8 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
HOTSPOT_$1_CPU_DEFINE=S390 HOTSPOT_$1_CPU_DEFINE=S390
elif test "x$OPENJDK_$1_CPU" = xs390x; then elif test "x$OPENJDK_$1_CPU" = xs390x; then
HOTSPOT_$1_CPU_DEFINE=S390 HOTSPOT_$1_CPU_DEFINE=S390
elif test "x$OPENJDK_$1_CPU" != x; then
HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z)
fi fi
AC_SUBST(HOTSPOT_$1_CPU_DEFINE) AC_SUBST(HOTSPOT_$1_CPU_DEFINE)

View file

@ -376,3 +376,6 @@ c3e83ccab3bb1733ae903d681879a33f85ed465c jdk-9+129
f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131 f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131
1ab4b9399c4cba584f66c1c088188f2f565fbf9c jdk-9+132 1ab4b9399c4cba584f66c1c088188f2f565fbf9c jdk-9+132
2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133 2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133
1a497f5ca0cfd88115cc7daa8af8a62b8741caf2 jdk-9+134
094d0db606db976045f594dba47d4593b715cc81 jdk-9+135
aa053a3faf266c12b4fd5272da431a3e08e4a3e3 jdk-9+136

View file

@ -536,3 +536,6 @@ e96b34b76d863ed1fa04e0eeb3f297ac17b490fd jdk-9+129
943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131 943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131
713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132 713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132
a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133 a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133
b8b694c6b4d2ab0939aed7adaf0eec1ac321a085 jdk-9+134
3b1c4562953db47e36b237a500f368d5c9746d47 jdk-9+135
a20da289f646ee44440695b81abc0548330e4ca7 jdk-9+136

View file

@ -67,6 +67,7 @@ JVM_FindSignal
JVM_FreeMemory JVM_FreeMemory
JVM_GC JVM_GC
JVM_GetAllThreads JVM_GetAllThreads
JVM_GetAndClearReferencePendingList
JVM_GetArrayElement JVM_GetArrayElement
JVM_GetArrayLength JVM_GetArrayLength
JVM_GetCallerClass JVM_GetCallerClass
@ -130,6 +131,7 @@ JVM_GetSystemPackages
JVM_GetTemporaryDirectory JVM_GetTemporaryDirectory
JVM_GetVmArguments JVM_GetVmArguments
JVM_Halt JVM_Halt
JVM_HasReferencePendingList
JVM_HoldsLock JVM_HoldsLock
JVM_IHashCode JVM_IHashCode
JVM_InitProperties JVM_InitProperties
@ -179,6 +181,7 @@ JVM_SuspendThread
JVM_ToStackTraceElement JVM_ToStackTraceElement
JVM_TotalMemory JVM_TotalMemory
JVM_UnloadLibrary JVM_UnloadLibrary
JVM_WaitForReferencePendingList
JVM_Yield JVM_Yield
# Module related API's # Module related API's

View file

@ -44,6 +44,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/native_sanity \ $(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/checked \
$(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \ $(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \ $(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \ $(HOTSPOT_TOPDIR)/test/runtime/SameObject \

View file

@ -326,7 +326,8 @@ void InterpreterMacroAssembler::push_i(Register r) {
} }
void InterpreterMacroAssembler::push_l(Register r) { void InterpreterMacroAssembler::push_l(Register r) {
str(r, pre(esp, 2 * -wordSize)); str(zr, pre(esp, -wordSize));
str(r, pre(esp, -wordsize));
} }
void InterpreterMacroAssembler::pop_f(FloatRegister r) { void InterpreterMacroAssembler::pop_f(FloatRegister r) {

View file

@ -2041,6 +2041,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_oop(r0); __ verify_oop(r0);
} }
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
}
if (!is_critical_native) { if (!is_critical_native) {
// reset handle block // reset handle block
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset())); __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));

View file

@ -1355,6 +1355,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// reset_last_Java_frame // reset_last_Java_frame
__ reset_last_Java_frame(true); __ reset_last_Java_frame(true);
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
}
// reset handle block // reset handle block
__ ldr(t, Address(rthread, JavaThread::active_handles_offset())); __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
__ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));

View file

@ -57,10 +57,12 @@ define_pd_global(intx, InlineSmallCode, 1500);
#ifdef _LP64 #ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM. // Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#else #else
define_pd_global(intx, CompilerThreadStackSize, 512);
define_pd_global(intx, ThreadStackSize, 512); define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512);
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))

View file

@ -359,7 +359,7 @@ void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, in
#ifdef _LP64 #ifdef _LP64
stx(l, r1, offset); stx(l, r1, offset);
// store something more useful here // store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) stx(G0, r1, offset+Interpreter::stackElementSize);
#else #else
st(l, r1, offset); st(l, r1, offset);
st(l->successor(), r1, offset + Interpreter::stackElementSize); st(l->successor(), r1, offset + Interpreter::stackElementSize);

View file

@ -2765,6 +2765,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_oop(I0); __ verify_oop(I0);
} }
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
}
if (!is_critical_native) { if (!is_critical_native) {
// reset handle block // reset handle block
__ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);

View file

@ -1487,6 +1487,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ set(_thread_in_Java, G3_scratch); __ set(_thread_in_Java, G3_scratch);
__ st(G3_scratch, thread_state); __ st(G3_scratch, thread_state);
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
}
// reset handle block // reset handle block
__ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
__ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());

View file

@ -65,10 +65,10 @@ define_pd_global(intx, InlineSmallCode, 1000);
#ifdef AMD64 #ifdef AMD64
// Very large C++ stack frames using solaris-amd64 optimized builds // Very large C++ stack frames using solaris-amd64 optimized builds
// due to lack of optimization caused by C++ compiler bugs // due to lack of optimization caused by C++ compiler bugs
#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(7) DEBUG_ONLY(+2))
// For those clients that do not use write socket, we allow // For those clients that do not use write socket, we allow
// the min range value to be below that of the default // the min range value to be below that of the default
#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(6) DEBUG_ONLY(+2)) #define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(7) DEBUG_ONLY(+2))
#else #else
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5)) #define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES

View file

@ -611,7 +611,8 @@ void InterpreterMacroAssembler::pop_l(Register r) {
void InterpreterMacroAssembler::push_l(Register r) { void InterpreterMacroAssembler::push_l(Register r) {
subptr(rsp, 2 * wordSize); subptr(rsp, 2 * wordSize);
movq(Address(rsp, 0), r); movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r );
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD );
} }
void InterpreterMacroAssembler::pop(TosState state) { void InterpreterMacroAssembler::pop(TosState state) {

View file

@ -2236,6 +2236,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_oop(rax); __ verify_oop(rax);
} }
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
}
if (!is_critical_native) { if (!is_critical_native) {
// reset handle block // reset handle block
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset())); __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));

View file

@ -2589,6 +2589,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_oop(rax); __ verify_oop(rax);
} }
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
}
if (!is_critical_native) { if (!is_critical_native) {
// reset handle block // reset handle block
__ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset())); __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));

View file

@ -1169,6 +1169,11 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// reset_last_Java_frame // reset_last_Java_frame
__ reset_last_Java_frame(thread, true); __ reset_last_Java_frame(thread, true);
if (CheckJNICalls) {
// clear_pending_jni_exception_check
__ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
}
// reset handle block // reset handle block
__ movptr(t, Address(thread, JavaThread::active_handles_offset())); __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
__ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);

View file

@ -68,6 +68,7 @@ public class InstanceKlass extends Klass {
Type type = db.lookupType("InstanceKlass"); Type type = db.lookupType("InstanceKlass");
arrayKlasses = new MetadataField(type.getAddressField("_array_klasses"), 0); arrayKlasses = new MetadataField(type.getAddressField("_array_klasses"), 0);
methods = type.getAddressField("_methods"); methods = type.getAddressField("_methods");
defaultMethods = type.getAddressField("_default_methods");
methodOrdering = type.getAddressField("_method_ordering"); methodOrdering = type.getAddressField("_method_ordering");
localInterfaces = type.getAddressField("_local_interfaces"); localInterfaces = type.getAddressField("_local_interfaces");
transitiveInterfaces = type.getAddressField("_transitive_interfaces"); transitiveInterfaces = type.getAddressField("_transitive_interfaces");
@ -128,6 +129,7 @@ public class InstanceKlass extends Klass {
private static MetadataField arrayKlasses; private static MetadataField arrayKlasses;
private static AddressField methods; private static AddressField methods;
private static AddressField defaultMethods;
private static AddressField methodOrdering; private static AddressField methodOrdering;
private static AddressField localInterfaces; private static AddressField localInterfaces;
private static AddressField transitiveInterfaces; private static AddressField transitiveInterfaces;
@ -335,6 +337,20 @@ public class InstanceKlass extends Klass {
// Accessors for declared fields // Accessors for declared fields
public Klass getArrayKlasses() { return (Klass) arrayKlasses.getValue(this); } public Klass getArrayKlasses() { return (Klass) arrayKlasses.getValue(this); }
public MethodArray getMethods() { return new MethodArray(methods.getValue(getAddress())); } public MethodArray getMethods() { return new MethodArray(methods.getValue(getAddress())); }
public MethodArray getDefaultMethods() {
if (defaultMethods != null) {
Address addr = defaultMethods.getValue(getAddress());
if ((addr != null) && (addr.getAddressAt(0) != null)) {
return new MethodArray(addr);
} else {
return null;
}
} else {
return null;
}
}
public KlassArray getLocalInterfaces() { return new KlassArray(localInterfaces.getValue(getAddress())); } public KlassArray getLocalInterfaces() { return new KlassArray(localInterfaces.getValue(getAddress())); }
public KlassArray getTransitiveInterfaces() { return new KlassArray(transitiveInterfaces.getValue(getAddress())); } public KlassArray getTransitiveInterfaces() { return new KlassArray(transitiveInterfaces.getValue(getAddress())); }
public int getJavaFieldsCount() { return (int) javaFieldsCount.getValue(this); } public int getJavaFieldsCount() { return (int) javaFieldsCount.getValue(this); }

View file

@ -36,6 +36,7 @@ import sun.jvm.hotspot.utilities.*;
public class MethodData extends Metadata implements MethodDataInterface<Klass,Method> { public class MethodData extends Metadata implements MethodDataInterface<Klass,Method> {
static int TypeProfileWidth = 2; static int TypeProfileWidth = 2;
static int BciProfileWidth = 2; static int BciProfileWidth = 2;
static int MethodProfileWidth = 0;
static int CompileThreshold; static int CompileThreshold;
static int Reason_many; // indicates presence of several reasons static int Reason_many; // indicates presence of several reasons
@ -142,6 +143,8 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
TypeProfileWidth = (int)flag.getIntx(); TypeProfileWidth = (int)flag.getIntx();
} else if (flag.getName().equals("BciProfileWidth")) { } else if (flag.getName().equals("BciProfileWidth")) {
BciProfileWidth = (int)flag.getIntx(); BciProfileWidth = (int)flag.getIntx();
} else if (flag.getName().equals("MethodProfileWidth")) {
MethodProfileWidth = (int)flag.getIntx();
} else if (flag.getName().equals("CompileThreshold")) { } else if (flag.getName().equals("CompileThreshold")) {
CompileThreshold = (int)flag.getIntx(); CompileThreshold = (int)flag.getIntx();
} }
@ -154,7 +157,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
parametersTypeDataDi = new CIntField(type.getCIntegerField("_parameters_type_data_di"), 0); parametersTypeDataDi = new CIntField(type.getCIntegerField("_parameters_type_data_di"), 0);
sizeofMethodDataOopDesc = (int)type.getSize();; sizeofMethodDataOopDesc = (int)type.getSize();
Reason_many = db.lookupIntConstant("Deoptimization::Reason_many").intValue(); Reason_many = db.lookupIntConstant("Deoptimization::Reason_many").intValue();
Reason_none = db.lookupIntConstant("Deoptimization::Reason_none").intValue(); Reason_none = db.lookupIntConstant("Deoptimization::Reason_none").intValue();
@ -257,7 +260,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
ParametersTypeData<Klass,Method> parametersTypeData() { ParametersTypeData<Klass,Method> parametersTypeData() {
int di = (int)parametersTypeDataDi.getValue(getAddress()); int di = (int)parametersTypeDataDi.getValue(getAddress());
if (di == -1) { if (di == -1 || di == -2) {
return null; return null;
} }
DataLayout dataLayout = new DataLayout(this, di + (int)data.getOffset()); DataLayout dataLayout = new DataLayout(this, di + (int)data.getOffset());

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,9 +38,21 @@ import sun.jvm.hotspot.utilities.*;
// that the check is reached, and a series of (Klass, count) pairs // that the check is reached, and a series of (Klass, count) pairs
// which are used to store a type profile for the receiver of the check. // which are used to store a type profile for the receiver of the check.
public class ReceiverTypeData<K,M> extends CounterData { public class ReceiverTypeData<K,M> extends CounterData {
static final int receiver0Offset = counterCellCount; static final int INCLUDE_JVMCI;
static final int count0Offset = receiver0Offset + 1; static final int nonProfiledCountOffset = counterCellCount;
static final int receiverTypeRowCellCount = (count0Offset + 1) - receiver0Offset; static final int receiver0Offset;
static final int count0Offset;
static final int receiverTypeRowCellCount;
static {
INCLUDE_JVMCI = VM.getVM().getTypeDataBase().lookupIntConstant("INCLUDE_JVMCI");
if (INCLUDE_JVMCI == 1) {
receiver0Offset = nonProfiledCountOffset + 1;
} else {
receiver0Offset = counterCellCount;
}
count0Offset = receiver0Offset + 1;
receiverTypeRowCellCount = (count0Offset + 1) - receiver0Offset;
}
final MethodDataInterface<K,M> methodData; final MethodDataInterface<K,M> methodData;
public ReceiverTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) { public ReceiverTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) {
@ -53,7 +65,11 @@ public class ReceiverTypeData<K,M> extends CounterData {
boolean isReceivertypedata() { return true; } boolean isReceivertypedata() { return true; }
static int staticCellCount() { static int staticCellCount() {
return counterCellCount + MethodData.TypeProfileWidth * receiverTypeRowCellCount; int cellCount = counterCellCount + MethodData.TypeProfileWidth * receiverTypeRowCellCount;
if (INCLUDE_JVMCI == 1) {
cellCount += 1;
}
return cellCount;
} }
public int cellCount() { public int cellCount() {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,11 @@ public class VirtualCallData<K,M> extends ReceiverTypeData<K,M> {
static int staticCellCount() { static int staticCellCount() {
// At this point we could add more profile state, e.g., for arguments. // At this point we could add more profile state, e.g., for arguments.
// But for now it's the same size as the base record type. // But for now it's the same size as the base record type.
return ReceiverTypeData.staticCellCount(); int cellCount = ReceiverTypeData.staticCellCount();
if (INCLUDE_JVMCI == 1) {
cellCount += MethodData.MethodProfileWidth * receiverTypeRowCellCount;
}
return cellCount;
} }
public int cellCount() { public int cellCount() {

View file

@ -129,8 +129,6 @@ public class Threads {
virtualConstructor.addMapping("CompilerThread", CompilerThread.class); virtualConstructor.addMapping("CompilerThread", CompilerThread.class);
virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class); virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
} }
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("ReferencePendingListLockerThread", JavaThread.class);
virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class); virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
virtualConstructor.addMapping("ServiceThread", ServiceThread.class); virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
} }
@ -172,7 +170,7 @@ public class Threads {
return thread; return thread;
} catch (Exception e) { } catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr + throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, ReferencePendingListLockerThread, or CodeCacheSweeperThread)", e); " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread or CodeCacheSweeperThread)", e);
} }
} }

View file

@ -65,4 +65,7 @@ public class GrowableArray<T> extends GenericGrowableArray {
super(addr); super(addr);
virtualConstructor = v; virtualConstructor = v;
} }
public Address getData() {
return dataField.getValue(getAddress());
}
} }

View file

@ -837,7 +837,6 @@ vmType2Class["InterpreterCodelet"] = sapkg.interpreter.InterpreterCodelet;
vmType2Class["JavaThread"] = sapkg.runtime.JavaThread; vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread; vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread; vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
vmType2Class["ReferencePendingListLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread; vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
// gc // gc

View file

@ -847,7 +847,8 @@ static void *thread_native_entry(Thread *thread) {
return 0; return 0;
} }
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { bool os::create_thread(Thread* thread, ThreadType thr_type,
size_t req_stack_size) {
assert(thread->osthread() == NULL, "caller responsible"); assert(thread->osthread() == NULL, "caller responsible");
@ -880,37 +881,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???"); guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
// calculate stack size if it's not specified by caller // calculate stack size if it's not specified by caller
if (stack_size == 0) { size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
stack_size = os::Aix::default_stack_size(thr_type);
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
if (CompilerThreadStackSize > 0) {
stack_size = (size_t)(CompilerThreadStackSize * K);
break;
} // else fall through:
// use VMThreadStackSize if CompilerThreadStackSize is not defined
case os::vm_thread:
case os::pgc_thread:
case os::cgc_thread:
case os::watcher_thread:
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
break;
}
}
stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
pthread_attr_setstacksize(&attr, stack_size); pthread_attr_setstacksize(&attr, stack_size);
pthread_t tid; pthread_t tid;
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread); int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
char buf[64]; char buf[64];
if (ret == 0) { if (ret == 0) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ", log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
@ -3593,32 +3569,11 @@ jint os::init_2(void) {
Aix::signal_sets_init(); Aix::signal_sets_init();
Aix::install_signal_handlers(); Aix::install_signal_handlers();
// Check minimum allowable stack size for thread creation and to initialize // Check and sets minimum stack sizes against command line options
// the java system classes, including StackOverflowError - depends on page if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < os::Aix::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
os::Aix::min_stack_allowed / K);
return JNI_ERR; return JNI_ERR;
} }
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
// Note that this can be 0, if no default stacksize was set.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
if (UseNUMA) { if (UseNUMA) {
UseNUMA = false; UseNUMA = false;
warning("NUMA optimizations are not available on this OS."); warning("NUMA optimizations are not available on this OS.");

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved. * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -140,14 +140,6 @@ class Aix {
// libpthread version string // libpthread version string
static void libpthread_init(); static void libpthread_init();
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
// Return default stack size or guard size for the specified thread type
static size_t default_stack_size(os::ThreadType thr_type);
static size_t default_guard_size(os::ThreadType thr_type);
// Function returns true if we run on OS/400 (pase), false if we run // Function returns true if we run on OS/400 (pase), false if we run
// on AIX. // on AIX.
static bool on_pase() { static bool on_pase() {

View file

@ -734,7 +734,8 @@ static void *thread_native_entry(Thread *thread) {
return 0; return 0;
} }
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { bool os::create_thread(Thread* thread, ThreadType thr_type,
size_t req_stack_size) {
assert(thread->osthread() == NULL, "caller responsible"); assert(thread->osthread() == NULL, "caller responsible");
// Allocate the OSThread object // Allocate the OSThread object
@ -757,32 +758,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
// calculate stack size if it's not specified by caller // calculate stack size if it's not specified by caller
if (stack_size == 0) { size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
stack_size = os::Bsd::default_stack_size(thr_type);
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
if (CompilerThreadStackSize > 0) {
stack_size = (size_t)(CompilerThreadStackSize * K);
break;
} // else fall through:
// use VMThreadStackSize if CompilerThreadStackSize is not defined
case os::vm_thread:
case os::pgc_thread:
case os::cgc_thread:
case os::watcher_thread:
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
break;
}
}
stack_size = MAX2(stack_size, os::Bsd::min_stack_allowed);
pthread_attr_setstacksize(&attr, stack_size); pthread_attr_setstacksize(&attr, stack_size);
ThreadState state; ThreadState state;
@ -3502,32 +3478,11 @@ jint os::init_2(void) {
Bsd::signal_sets_init(); Bsd::signal_sets_init();
Bsd::install_signal_handlers(); Bsd::install_signal_handlers();
// Check minimum allowable stack size for thread creation and to initialize // Check and sets minimum stack sizes against command line options
// the java system classes, including StackOverflowError - depends on page if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Bsd::min_stack_allowed = align_size_up(os::Bsd::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
os::Bsd::min_stack_allowed/ K);
return JNI_ERR; return JNI_ERR;
} }
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
vm_page_size()));
if (MaxFDLimit) { if (MaxFDLimit) {
// set the number of file descriptors to max. print out error // set the number of file descriptors to max. print out error
// if getrlimit/setrlimit fails but continue regardless. // if getrlimit/setrlimit fails but continue regardless.

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -120,14 +120,6 @@ class Bsd {
static struct sigaction *get_chained_signal_action(int sig); static struct sigaction *get_chained_signal_action(int sig);
static bool chained_handler(int sig, siginfo_t* siginfo, void* context); static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
// Return default stack size or guard size for the specified thread type
static size_t default_stack_size(os::ThreadType thr_type);
static size_t default_guard_size(os::ThreadType thr_type);
// Real-time clock functions // Real-time clock functions
static void clock_init(void); static void clock_init(void);

View file

@ -701,7 +701,7 @@ static void *thread_native_entry(Thread *thread) {
} }
bool os::create_thread(Thread* thread, ThreadType thr_type, bool os::create_thread(Thread* thread, ThreadType thr_type,
size_t stack_size) { size_t req_stack_size) {
assert(thread->osthread() == NULL, "caller responsible"); assert(thread->osthread() == NULL, "caller responsible");
// Allocate the OSThread object // Allocate the OSThread object
@ -723,34 +723,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
pthread_attr_init(&attr); pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
// stack size
// calculate stack size if it's not specified by caller // calculate stack size if it's not specified by caller
if (stack_size == 0) { size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
stack_size = os::Linux::default_stack_size(thr_type);
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
if (CompilerThreadStackSize > 0) {
stack_size = (size_t)(CompilerThreadStackSize * K);
break;
} // else fall through:
// use VMThreadStackSize if CompilerThreadStackSize is not defined
case os::vm_thread:
case os::pgc_thread:
case os::cgc_thread:
case os::watcher_thread:
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
break;
}
}
stack_size = MAX2(stack_size, os::Linux::min_stack_allowed);
pthread_attr_setstacksize(&attr, stack_size); pthread_attr_setstacksize(&attr, stack_size);
// glibc guard page // glibc guard page
@ -956,10 +930,9 @@ static bool find_vma(address addr, address* vma_low, address* vma_high) {
// bogus value for initial thread. // bogus value for initial thread.
void os::Linux::capture_initial_stack(size_t max_size) { void os::Linux::capture_initial_stack(size_t max_size) {
// stack size is the easy part, get it from RLIMIT_STACK // stack size is the easy part, get it from RLIMIT_STACK
size_t stack_size;
struct rlimit rlim; struct rlimit rlim;
getrlimit(RLIMIT_STACK, &rlim); getrlimit(RLIMIT_STACK, &rlim);
stack_size = rlim.rlim_cur; size_t stack_size = rlim.rlim_cur;
// 6308388: a bug in ld.so will relocate its own .data section to the // 6308388: a bug in ld.so will relocate its own .data section to the
// lower end of primordial stack; reduce ulimit -s value a little bit // lower end of primordial stack; reduce ulimit -s value a little bit
@ -2875,7 +2848,7 @@ void os::Linux::rebuild_cpu_to_node_map() {
// in the library. // in the library.
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT; const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
size_t cpu_num = os::active_processor_count(); size_t cpu_num = processor_count();
size_t cpu_map_size = NCPUS / BitsPerCLong; size_t cpu_map_size = NCPUS / BitsPerCLong;
size_t cpu_map_valid_size = size_t cpu_map_valid_size =
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
@ -4793,32 +4766,10 @@ jint os::init_2(void) {
Linux::signal_sets_init(); Linux::signal_sets_init();
Linux::install_signal_handlers(); Linux::install_signal_handlers();
// Check minimum allowable stack size for thread creation and to initialize // Check and sets minimum stack sizes against command line options
// the java system classes, including StackOverflowError - depends on page if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Linux::min_stack_allowed = align_size_up(os::Linux::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < os::Linux::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least " SIZE_FORMAT "k",
os::Linux::min_stack_allowed/ K);
return JNI_ERR; return JNI_ERR;
} }
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
vm_page_size()));
Linux::capture_initial_stack(JavaThread::stack_size_at_create()); Linux::capture_initial_stack(JavaThread::stack_size_at_create());
#if defined(IA32) #if defined(IA32)

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -170,12 +170,8 @@ class Linux {
static void libpthread_init(); static void libpthread_init();
static bool libnuma_init(); static bool libnuma_init();
static void* libnuma_dlsym(void* handle, const char* name); static void* libnuma_dlsym(void* handle, const char* name);
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
// Return default stack size or guard size for the specified thread type // Return default guard size for the specified thread type
static size_t default_stack_size(os::ThreadType thr_type);
static size_t default_guard_size(os::ThreadType thr_type); static size_t default_guard_size(os::ThreadType thr_type);
static void capture_initial_stack(size_t max_size); static void capture_initial_stack(size_t max_size);

View file

@ -1099,6 +1099,123 @@ char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_a
return buf; return buf;
} }
// Check minimum allowable stack sizes for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
jint os::Posix::set_minimum_stack_sizes() {
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(4 * BytesPerWord COMPILER2_PRESENT(+ 2)) * 4 * K);
_java_thread_min_stack_allowed = align_size_up(_java_thread_min_stack_allowed, vm_page_size());
size_t stack_size_in_bytes = ThreadStackSize * K;
if (stack_size_in_bytes != 0 &&
stack_size_in_bytes < _java_thread_min_stack_allowed) {
// The '-Xss' and '-XX:ThreadStackSize=N' options both set
// ThreadStackSize so we go with "Java thread stack size" instead
// of "ThreadStackSize" to be more friendly.
tty->print_cr("\nThe Java thread stack size specified is too small. "
"Specify at least " SIZE_FORMAT "k",
_java_thread_min_stack_allowed / K);
return JNI_ERR;
}
#ifdef SOLARIS
// For 64kbps there will be a 64kb page size, which makes
// the usable default stack size quite a bit less. Increase the
// stack for 64kb (or any > than 8kb) pages, this increases
// virtual memory fragmentation (since we're not creating the
// stack on a power of 2 boundary. The real fix for this
// should be to fix the guard page mechanism.
if (vm_page_size() > 8*K) {
stack_size_in_bytes = (stack_size_in_bytes != 0)
? stack_size_in_bytes +
JavaThread::stack_red_zone_size() +
JavaThread::stack_yellow_zone_size()
: 0;
ThreadStackSize = stack_size_in_bytes/K;
}
#endif // SOLARIS
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
JavaThread::set_stack_size_at_create(round_to(stack_size_in_bytes,
vm_page_size()));
_compiler_thread_min_stack_allowed = align_size_up(_compiler_thread_min_stack_allowed, vm_page_size());
stack_size_in_bytes = CompilerThreadStackSize * K;
if (stack_size_in_bytes != 0 &&
stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
"Specify at least " SIZE_FORMAT "k",
_compiler_thread_min_stack_allowed / K);
return JNI_ERR;
}
_vm_internal_thread_min_stack_allowed = align_size_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
stack_size_in_bytes = VMThreadStackSize * K;
if (stack_size_in_bytes != 0 &&
stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
tty->print_cr("\nThe VMThreadStackSize specified is too small. "
"Specify at least " SIZE_FORMAT "k",
_vm_internal_thread_min_stack_allowed / K);
return JNI_ERR;
}
return JNI_OK;
}
// Called when creating the thread. The minimum stack sizes have already been calculated
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
size_t stack_size;
if (req_stack_size == 0) {
stack_size = default_stack_size(thr_type);
} else {
stack_size = req_stack_size;
}
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
// no requested size and we have a more specific default value
stack_size = JavaThread::stack_size_at_create();
}
stack_size = MAX2(stack_size,
_java_thread_min_stack_allowed);
break;
case os::compiler_thread:
if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
// no requested size and we have a more specific default value
stack_size = (size_t)(CompilerThreadStackSize * K);
}
stack_size = MAX2(stack_size,
_compiler_thread_min_stack_allowed);
break;
case os::vm_thread:
case os::pgc_thread:
case os::cgc_thread:
case os::watcher_thread:
default: // presume the unknown thr_type is a VM internal
if (req_stack_size == 0 && VMThreadStackSize > 0) {
// no requested size and we have a more specific default value
stack_size = (size_t)(VMThreadStackSize * K);
}
stack_size = MAX2(stack_size,
_vm_internal_thread_min_stack_allowed);
break;
}
return stack_size;
}
os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,18 @@ protected:
static void print_libversion_info(outputStream* st); static void print_libversion_info(outputStream* st);
static void print_load_average(outputStream* st); static void print_load_average(outputStream* st);
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t _compiler_thread_min_stack_allowed;
static size_t _java_thread_min_stack_allowed;
static size_t _vm_internal_thread_min_stack_allowed;
public: public:
// Return default stack size for the specified thread type
static size_t default_stack_size(os::ThreadType thr_type);
// Check and sets minimum stack sizes
static jint set_minimum_stack_sizes();
static size_t get_initial_stack_size(ThreadType thr_type, size_t req_stack_size);
// Returns true if signal is valid. // Returns true if signal is valid.
static bool is_valid_signal(int sig); static bool is_valid_signal(int sig);

View file

@ -917,8 +917,15 @@ static char* describe_thr_create_attributes(char* buf, size_t buflen,
return buf; return buf;
} }
// return default stack size for thr_type
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size when not specified by caller is 1M (2M for LP64)
size_t s = (BytesPerWord >> 2) * K * K;
return s;
}
bool os::create_thread(Thread* thread, ThreadType thr_type, bool os::create_thread(Thread* thread, ThreadType thr_type,
size_t stack_size) { size_t req_stack_size) {
// Allocate the OSThread object // Allocate the OSThread object
OSThread* osthread = new OSThread(NULL, NULL); OSThread* osthread = new OSThread(NULL, NULL);
if (osthread == NULL) { if (osthread == NULL) {
@ -953,31 +960,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
} }
// Calculate stack size if it's not specified by caller. // calculate stack size if it's not specified by caller
if (stack_size == 0) { size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
// The default stack size 1M (2M for LP64).
stack_size = (BytesPerWord >> 2) * K * K;
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
if (CompilerThreadStackSize > 0) {
stack_size = (size_t)(CompilerThreadStackSize * K);
break;
} // else fall through:
// use VMThreadStackSize if CompilerThreadStackSize is not defined
case os::vm_thread:
case os::pgc_thread:
case os::cgc_thread:
case os::watcher_thread:
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
break;
}
}
stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
// Initial state is ALLOCATED but not INITIALIZED // Initial state is ALLOCATED but not INITIALIZED
osthread->set_state(ALLOCATED); osthread->set_state(ALLOCATED);
@ -4400,7 +4384,12 @@ void os::init(void) {
// Constant minimum stack size allowed. It must be at least // Constant minimum stack size allowed. It must be at least
// the minimum of what the OS supports (thr_min_stack()), and // the minimum of what the OS supports (thr_min_stack()), and
// enough to allow the thread to get to user bytecode execution. // enough to allow the thread to get to user bytecode execution.
Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); Posix::_compiler_thread_min_stack_allowed = MAX2(thr_min_stack(),
Posix::_compiler_thread_min_stack_allowed);
Posix::_java_thread_min_stack_allowed = MAX2(thr_min_stack(),
Posix::_java_thread_min_stack_allowed);
Posix::_vm_internal_thread_min_stack_allowed = MAX2(thr_min_stack(),
Posix::_vm_internal_thread_min_stack_allowed);
// dynamic lookup of functions that may not be available in our lowest // dynamic lookup of functions that may not be available in our lowest
// supported Solaris release // supported Solaris release
@ -4445,47 +4434,11 @@ jint os::init_2(void) {
log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
} }
// Check minimum allowable stack size for thread creation and to initialize // Check and sets minimum stack sizes against command line options
// the java system classes, including StackOverflowError - depends on page if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Solaris::min_stack_allowed = align_size_up(os::Solaris::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
os::Solaris::min_stack_allowed/K);
return JNI_ERR; return JNI_ERR;
} }
// For 64kbps there will be a 64kb page size, which makes
// the usable default stack size quite a bit less. Increase the
// stack for 64kb (or any > than 8kb) pages, this increases
// virtual memory fragmentation (since we're not creating the
// stack on a power of 2 boundary. The real fix for this
// should be to fix the guard page mechanism.
if (vm_page_size() > 8*K) {
threadStackSizeInBytes = (threadStackSizeInBytes != 0)
? threadStackSizeInBytes +
JavaThread::stack_red_zone_size() +
JavaThread::stack_yellow_zone_size()
: 0;
ThreadStackSize = threadStackSizeInBytes/K;
}
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
vm_page_size()));
Solaris::libthread_init(); Solaris::libthread_init();
if (UseNUMA) { if (UseNUMA) {

View file

@ -292,10 +292,6 @@ class Solaris {
static jint _os_thread_limit; static jint _os_thread_limit;
static volatile jint _os_thread_count; static volatile jint _os_thread_count;
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
// Stack overflow handling // Stack overflow handling

View file

@ -2504,13 +2504,15 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// It write enables the page immediately after protecting it // It write enables the page immediately after protecting it
// so just return. // so just return.
if (exception_code == EXCEPTION_ACCESS_VIOLATION) { if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
JavaThread* thread = (JavaThread*) t; if (t != NULL && t->is_Java_thread()) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; JavaThread* thread = (JavaThread*) t;
address addr = (address) exceptionRecord->ExceptionInformation[1]; PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
if (os::is_memory_serialize_page(thread, addr)) { address addr = (address) exceptionRecord->ExceptionInformation[1];
// Block current thread until the memory serialize page permission restored. if (os::is_memory_serialize_page(thread, addr)) {
os::block_on_serialize_page_trap(); // Block current thread until the memory serialize page permission restored.
return EXCEPTION_CONTINUE_EXECUTION; os::block_on_serialize_page_trap();
return EXCEPTION_CONTINUE_EXECUTION;
}
} }
} }
@ -2564,7 +2566,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
} }
#endif #endif
if (thread->stack_guards_enabled()) { if (thread->stack_guards_enabled()) {
if (_thread_in_Java) { if (in_java) {
frame fr; frame fr;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1]; address addr = (address) exceptionRecord->ExceptionInformation[1];
@ -2576,6 +2578,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Yellow zone violation. The o/s has unprotected the first yellow // Yellow zone violation. The o/s has unprotected the first yellow
// zone page for us. Note: must call disable_stack_yellow_zone to // zone page for us. Note: must call disable_stack_yellow_zone to
// update the enabled status, even if the zone contains only one page. // update the enabled status, even if the zone contains only one page.
assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
thread->disable_stack_yellow_reserved_zone(); thread->disable_stack_yellow_reserved_zone();
// If not in java code, return and hope for the best. // If not in java code, return and hope for the best.
return in_java return in_java
@ -3793,6 +3796,11 @@ void os::win32::initialize_system_info() {
GlobalMemoryStatusEx(&ms); GlobalMemoryStatusEx(&ms);
_physical_memory = ms.ullTotalPhys; _physical_memory = ms.ullTotalPhys;
if (FLAG_IS_DEFAULT(MaxRAM)) {
// Adjust MaxRAM according to the maximum virtual address space available.
FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
}
OSVERSIONINFOEX oi; OSVERSIONINFOEX oi;
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
GetVersionEx((OSVERSIONINFO*)&oi); GetVersionEx((OSVERSIONINFO*)&oi);
@ -4207,7 +4215,7 @@ jint os::init_2(void) {
min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size()); min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
if (actual_reserve_size < min_stack_allowed) { if (actual_reserve_size < min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, " tty->print_cr("\nThe Java thread stack size specified is too small. "
"Specify at least %dk", "Specify at least %dk",
min_stack_allowed / K); min_stack_allowed / K);
return JNI_ERR; return JNI_ERR;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -33,10 +33,6 @@ define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 2048); define_pd_global(intx, VMThreadStackSize, 2048);
// if we set CompilerThreadStackSize to a value different than 0, it will
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
// the stack size for compiler threads will default to VMThreadStackSize, although it
// is defined to 4M in os::Aix::default_stack_size()!
define_pd_global(intx, CompilerThreadStackSize, 4096); define_pd_global(intx, CompilerThreadStackSize, 4096);
// Allow extra space in DEBUG builds for asserts. // Allow extra space in DEBUG builds for asserts.

View file

@ -192,8 +192,10 @@ frame os::current_frame() {
intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer()); intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
// hack. // hack.
frame topframe(csp, (address)0x8); frame topframe(csp, (address)0x8);
// return sender of current topframe which hopefully has pc != NULL. // Return sender of sender of current topframe which hopefully
return os::get_sender_for_C_frame(&topframe); // both have pc != NULL.
frame tmp = os::get_sender_for_C_frame(&topframe);
return os::get_sender_for_C_frame(&tmp);
} }
// Utility functions // Utility functions
@ -533,23 +535,17 @@ void os::Aix::init_thread_fpu_state(void) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Aix::min_stack_allowed = 128*K; size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
// return default stack size for thr_type // return default stack size for thr_type
size_t os::Aix::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack) // default stack size (compiler thread needs larger stack)
// Notice that the setting for compiler threads here have no impact
// because of the strange 'fallback logic' in os::create_thread().
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
// specify a different stack size for compiler threads!
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s; return s;
} }
size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
return 2 * page_size();
}
///////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler // helper functions for fatal error handler

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,9 +31,11 @@
// //
define_pd_global(bool, DontYieldALot, false); define_pd_global(bool, DontYieldALot, false);
#ifdef AMD64 #ifdef AMD64
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#else #else
define_pd_global(intx, CompilerThreadStackSize, 512);
// ThreadStackSize 320 allows a couple of test cases to run while // ThreadStackSize 320 allows a couple of test cases to run while
// keeping the number of threads that can be created high. System // keeping the number of threads that can be created high. System
// default ThreadStackSize appears to be 512 which is too big. // default ThreadStackSize appears to be 512 which is too big.
@ -41,7 +43,6 @@ define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512);
#endif // AMD64 #endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(size_t, JVMInvokeMethodSlack, 8192); define_pd_global(size_t, JVMInvokeMethodSlack, 8192);

View file

@ -838,9 +838,13 @@ bool os::is_allocatable(size_t bytes) {
// thread stack // thread stack
#ifdef AMD64 #ifdef AMD64
size_t os::Bsd::min_stack_allowed = 64 * K; size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
#else #else
size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; size_t os::Posix::_compiler_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
size_t os::Posix::_java_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
#ifdef __GNUC__ #ifdef __GNUC__
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
@ -849,7 +853,7 @@ size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
#endif // AMD64 #endif // AMD64
// return default stack size for thr_type // return default stack size for thr_type
size_t os::Bsd::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack) // default stack size (compiler thread needs larger stack)
#ifdef AMD64 #ifdef AMD64
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
@ -859,11 +863,6 @@ size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
return s; return s;
} }
size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
// Creating guard page is very expensive. Java thread has HotSpot
// guard page, only enable glibc guard page for non-Java threads.
return (thr_type == java_thread ? 0 : page_size());
}
// Java thread: // Java thread:
// //

View file

@ -282,9 +282,11 @@ bool os::is_allocatable(size_t bytes) {
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Bsd::min_stack_allowed = 64 * K; size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
size_t os::Bsd::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
#ifdef _LP64 #ifdef _LP64
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
#else #else
@ -293,12 +295,6 @@ size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
return s; return s;
} }
size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
// Only enable glibc guard pages for non-Java threads
// (Java threads have HotSpot guard pages)
return (thr_type == java_thread ? 0 : page_size());
}
static void current_stack_region(address *bottom, size_t *size) { static void current_stack_region(address *bottom, size_t *size) {
address stack_bottom; address stack_bottom;
address stack_top; address stack_top;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -33,7 +33,7 @@ define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 2048); define_pd_global(intx, VMThreadStackSize, 2048);
define_pd_global(intx, CompilerThreadStackSize, 0); define_pd_global(intx, CompilerThreadStackSize, 2048);
define_pd_global(uintx,JVMInvokeMethodSlack, 8192); define_pd_global(uintx,JVMInvokeMethodSlack, 8192);

View file

@ -473,10 +473,12 @@ bool os::is_allocatable(size_t bytes) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Linux::min_stack_allowed = 64 * K; size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
// return default stack size for thr_type // return default stack size for thr_type
size_t os::Linux::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack) // default stack size (compiler thread needs larger stack)
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s; return s;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -33,10 +33,6 @@ define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 2048); define_pd_global(intx, VMThreadStackSize, 2048);
// if we set CompilerThreadStackSize to a value different than 0, it will
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
// the stack size for compiler threads will default to VMThreadStackSize, although it
// is defined to 4M in os::Linux::default_stack_size()!
define_pd_global(intx, CompilerThreadStackSize, 4096); define_pd_global(intx, CompilerThreadStackSize, 4096);
// Allow extra space in DEBUG builds for asserts. // Allow extra space in DEBUG builds for asserts.

View file

@ -205,8 +205,10 @@ frame os::current_frame() {
intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer()); intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
// hack. // hack.
frame topframe(csp, (address)0x8); frame topframe(csp, (address)0x8);
// return sender of current topframe which hopefully has pc != NULL. // Return sender of sender of current topframe which hopefully
return os::get_sender_for_C_frame(&topframe); // both have pc != NULL.
frame tmp = os::get_sender_for_C_frame(&topframe);
return os::get_sender_for_C_frame(&tmp);
} }
// Utility functions // Utility functions
@ -533,15 +535,13 @@ void os::Linux::set_fpu_control_word(int fpu_control) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Linux::min_stack_allowed = 128*K; size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
// return default stack size for thr_type // return default stack size for thr_type
size_t os::Linux::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack) // default stack size (compiler thread needs larger stack)
// Notice that the setting for compiler threads here have no impact
// because of the strange 'fallback logic' in os::create_thread().
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
// specify a different stack size for compiler threads!
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
return s; return s;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,6 @@
// //
define_pd_global(size_t, JVMInvokeMethodSlack, 12288); define_pd_global(size_t, JVMInvokeMethodSlack, 12288);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address // Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(size_t, HeapBaseMinAddress, CONST64(4)*G); define_pd_global(size_t, HeapBaseMinAddress, CONST64(4)*G);

View file

@ -726,10 +726,12 @@ bool os::is_allocatable(size_t bytes) {
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Linux::min_stack_allowed = 128 * K; size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
// return default stack size for thr_type // return default stack size for thr_type
size_t os::Linux::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack) // default stack size (compiler thread needs larger stack)
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s; return s;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,9 +30,11 @@
define_pd_global(bool, DontYieldALot, false); define_pd_global(bool, DontYieldALot, false);
#ifdef AMD64 #ifdef AMD64
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#else #else
define_pd_global(intx, CompilerThreadStackSize, 512);
// ThreadStackSize 320 allows a couple of test cases to run while // ThreadStackSize 320 allows a couple of test cases to run while
// keeping the number of threads that can be created high. System // keeping the number of threads that can be created high. System
// default ThreadStackSize appears to be 512 which is too big. // default ThreadStackSize appears to be 512 which is too big.
@ -40,8 +42,6 @@ define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512);
#endif // AMD64 #endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(size_t, JVMInvokeMethodSlack, 8192); define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address // Used on 64 bit platforms for UseCompressedOops base address

View file

@ -676,13 +676,17 @@ bool os::is_allocatable(size_t bytes) {
// thread stack // thread stack
#ifdef AMD64 #ifdef AMD64
size_t os::Linux::min_stack_allowed = 64 * K; size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
#else #else
size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; size_t os::Posix::_compiler_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
size_t os::Posix::_java_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
#endif // AMD64 #endif // AMD64
// return default stack size for thr_type // return default stack size for thr_type
size_t os::Linux::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack) // default stack size (compiler thread needs larger stack)
#ifdef AMD64 #ifdef AMD64
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);

View file

@ -307,9 +307,11 @@ bool os::is_allocatable(size_t bytes) {
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
// thread stack // thread stack
size_t os::Linux::min_stack_allowed = 64 * K; size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
size_t os::Linux::default_stack_size(os::ThreadType thr_type) { size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
#ifdef _LP64 #ifdef _LP64
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
#else #else

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,6 @@
// //
define_pd_global(size_t, JVMInvokeMethodSlack, 12288); define_pd_global(size_t, JVMInvokeMethodSlack, 12288);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address // Used on 64 bit platforms for UseCompressedOops base address
#ifdef _LP64 #ifdef _LP64

View file

@ -84,9 +84,13 @@
// Minimum stack size for the VM. It's easier to document a constant // Minimum stack size for the VM. It's easier to document a constant
// but it's different for x86 and sparc because the page sizes are different. // but it's different for x86 and sparc because the page sizes are different.
#ifdef _LP64 #ifdef _LP64
size_t os::Solaris::min_stack_allowed = 128*K; size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 128 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
#else #else
size_t os::Solaris::min_stack_allowed = 96*K; size_t os::Posix::_compiler_thread_min_stack_allowed = 96 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 96 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 96 * K;
#endif #endif
int os::Solaris::max_register_window_saves_before_flushing() { int os::Solaris::max_register_window_saves_before_flushing() {
@ -444,7 +448,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (thread->thread_state() == _thread_in_vm) { if (thread->thread_state() == _thread_in_vm) {
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) { if (sig == SIGBUS && thread->doing_unsafe_access()) {
stub = SharedRuntime::handle_unsafe_access(thread, npc); stub = SharedRuntime::handle_unsafe_access(thread, npc);
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,10 +30,12 @@
define_pd_global(bool, DontYieldALot, true); // Determined in the design center define_pd_global(bool, DontYieldALot, true); // Determined in the design center
#ifdef AMD64 #ifdef AMD64
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
define_pd_global(size_t, JVMInvokeMethodSlack, 8*K); define_pd_global(size_t, JVMInvokeMethodSlack, 8*K);
#else #else
define_pd_global(intx, CompilerThreadStackSize, 512);
// ThreadStackSize 320 allows a couple of test cases to run while // ThreadStackSize 320 allows a couple of test cases to run while
// keeping the number of threads that can be created high. // keeping the number of threads that can be created high.
define_pd_global(intx, ThreadStackSize, 320); define_pd_global(intx, ThreadStackSize, 320);
@ -41,7 +43,6 @@ define_pd_global(intx, VMThreadStackSize, 512);
define_pd_global(size_t, JVMInvokeMethodSlack, 10*K); define_pd_global(size_t, JVMInvokeMethodSlack, 10*K);
#endif // AMD64 #endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address // Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(size_t, HeapBaseMinAddress, 2*G); define_pd_global(size_t, HeapBaseMinAddress, 2*G);

View file

@ -86,15 +86,19 @@
#define MAX_PATH (2 * K) #define MAX_PATH (2 * K)
// Minimum stack size for the VM. It's easier to document a constant value // Minimum stack sizes for the VM. It's easier to document a constant value
// but it's different for x86 and sparc because the page sizes are different. // but it's different for x86 and sparc because the page sizes are different.
#ifdef AMD64 #ifdef AMD64
size_t os::Solaris::min_stack_allowed = 224*K; size_t os::Posix::_compiler_thread_min_stack_allowed = 394 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 224 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 224 * K;
#define REG_SP REG_RSP #define REG_SP REG_RSP
#define REG_PC REG_RIP #define REG_PC REG_RIP
#define REG_FP REG_RBP #define REG_FP REG_RBP
#else #else
size_t os::Solaris::min_stack_allowed = 64*K; size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 64 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
#define REG_SP UESP #define REG_SP UESP
#define REG_PC EIP #define REG_PC EIP
#define REG_FP EBP #define REG_FP EBP

View file

@ -223,6 +223,9 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
case vmIntrinsics::_putCharStringU: case vmIntrinsics::_putCharStringU:
#ifdef TRACE_HAVE_INTRINSICS #ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_counterTime: case vmIntrinsics::_counterTime:
#if defined(_LP64) || !defined(TRACE_ID_CLASS_SHIFT)
case vmIntrinsics::_getClassId:
#endif
#endif #endif
break; break;
default: default:

View file

@ -3092,6 +3092,37 @@ void LIRGenerator::do_IfOp(IfOp* x) {
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
} }
#ifdef TRACE_HAVE_INTRINSICS
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
CodeEmitInfo* info = state_for(x);
CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
assert(info != NULL, "must have info");
LIRItem arg(x->argument_at(0), this);
arg.load_item();
LIR_Opr klass = new_register(T_METADATA);
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
LIR_Opr id = new_register(T_LONG);
ByteSize offset = TRACE_KLASS_TRACE_ID_OFFSET;
LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
__ move(trace_id_addr, id);
__ logical_or(id, LIR_OprFact::longConst(0x01l), id);
__ store(id, trace_id_addr);
#ifdef TRACE_ID_META_BITS
__ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
#endif
#ifdef TRACE_ID_CLASS_SHIFT
__ unsigned_shift_right(id, TRACE_ID_CLASS_SHIFT, id);
#endif
__ move(id, rlock_result(x));
}
#endif
void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) { void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
assert(x->number_of_arguments() == 0, "wrong type"); assert(x->number_of_arguments() == 0, "wrong type");
// Enforce computation of _reserved_argument_area_size which is required on some platforms. // Enforce computation of _reserved_argument_area_size which is required on some platforms.
@ -3117,6 +3148,9 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
} }
#ifdef TRACE_HAVE_INTRINSICS #ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_getClassId:
do_ClassIDIntrinsic(x);
break;
case vmIntrinsics::_counterTime: case vmIntrinsics::_counterTime:
do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), x); do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), x);
break; break;

View file

@ -439,6 +439,10 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x); SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux); void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
#ifdef TRACE_HAVE_INTRINSICS
void do_ClassIDIntrinsic(Intrinsic* x);
#endif
void do_RuntimeCall(address routine, Intrinsic* x); void do_RuntimeCall(address routine, Intrinsic* x);
ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,

View file

@ -29,7 +29,6 @@
#include "ci/ciKlass.hpp" #include "ci/ciKlass.hpp"
#include "ci/ciUtilities.hpp" #include "ci/ciUtilities.hpp"
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
@ -577,9 +576,7 @@ class CompileReplay : public StackObj {
Method* method = parse_method(CHECK); Method* method = parse_method(CHECK);
if (had_error()) return; if (had_error()) return;
/* just copied from Method, to build interpret data*/ /* just copied from Method, to build interpret data*/
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
// To be properly initialized, some profiling in the MDO needs the // To be properly initialized, some profiling in the MDO needs the
// method to be rewritten (number of arguments at a call for // method to be rewritten (number of arguments at a call for
// instance) // instance)

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -205,103 +205,3 @@ juint AltHashing::murmur3_32(juint seed, const int* data, int len) {
juint AltHashing::murmur3_32(const int* data, int len) { juint AltHashing::murmur3_32(const int* data, int len) {
return murmur3_32(0, data, len); return murmur3_32(0, data, len);
} }
#ifndef PRODUCT
// Overloaded versions for internal test.
juint AltHashing::murmur3_32(const jbyte* data, int len) {
return murmur3_32(0, data, len);
}
juint AltHashing::murmur3_32(const jchar* data, int len) {
return murmur3_32(0, data, len);
}
// Internal test for alternate hashing. Translated from JDK version
// test/sun/misc/Hashing.java
static const jbyte ONE_BYTE[] = { (jbyte) 0x80};
static const jbyte TWO_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81};
static const jchar ONE_CHAR[] = { (jchar) 0x8180};
static const jbyte THREE_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82};
static const jbyte FOUR_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83};
static const jchar TWO_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382};
static const jint ONE_INT[] = { (jint)0x83828180};
static const jbyte SIX_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85};
static const jchar THREE_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382, (jchar) 0x8584};
static const jbyte EIGHT_BYTE[] = {
(jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82,
(jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85,
(jbyte) 0x86, (jbyte) 0x87};
static const jchar FOUR_CHAR[] = {
(jchar) 0x8180, (jchar) 0x8382,
(jchar) 0x8584, (jchar) 0x8786};
static const jint TWO_INT[] = { (jint)0x83828180, (jint)0x87868584};
static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
void AltHashing::testMurmur3_32_ByteArray() {
// printf("testMurmur3_32_ByteArray\n");
jbyte vector[256];
jbyte hashes[4 * 256];
for (int i = 0; i < 256; i++) {
vector[i] = (jbyte) i;
}
// Hash subranges {}, {0}, {0,1}, {0,1,2}, ..., {0,...,255}
for (int i = 0; i < 256; i++) {
juint hash = murmur3_32(256 - i, vector, i);
hashes[i * 4] = (jbyte) hash;
hashes[i * 4 + 1] = (jbyte)(hash >> 8);
hashes[i * 4 + 2] = (jbyte)(hash >> 16);
hashes[i * 4 + 3] = (jbyte)(hash >> 24);
}
// hash to get const result.
juint final_hash = murmur3_32(hashes, 4*256);
assert (MURMUR3_32_X86_CHECK_VALUE == final_hash,
"Calculated hash result not as expected. Expected %08X got %08X\n",
MURMUR3_32_X86_CHECK_VALUE,
final_hash);
}
void AltHashing::testEquivalentHashes() {
juint jbytes, jchars, ints;
// printf("testEquivalentHashes\n");
jbytes = murmur3_32(TWO_BYTE, 2);
jchars = murmur3_32(ONE_CHAR, 1);
assert (jbytes == jchars,
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
jbytes = murmur3_32(FOUR_BYTE, 4);
jchars = murmur3_32(TWO_CHAR, 2);
ints = murmur3_32(ONE_INT, 1);
assert ((jbytes == jchars) && (jbytes == ints),
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
jbytes = murmur3_32(SIX_BYTE, 6);
jchars = murmur3_32(THREE_CHAR, 3);
assert (jbytes == jchars,
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
jbytes = murmur3_32(EIGHT_BYTE, 8);
jchars = murmur3_32(FOUR_CHAR, 4);
ints = murmur3_32(TWO_INT, 2);
assert ((jbytes == jchars) && (jbytes == ints),
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
}
// Returns true if the alternate hashcode is correct
void AltHashing::test_alt_hash() {
testMurmur3_32_ByteArray();
testEquivalentHashes();
}
void AltHashing_test() {
AltHashing::test_alt_hash();
}
#endif // PRODUCT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,26 +37,18 @@
*/ */
class AltHashing : AllStatic { class AltHashing : AllStatic {
friend class AltHashingTest;
// utility function copied from java/lang/Integer // utility function copied from java/lang/Integer
static juint Integer_rotateLeft(juint i, int distance) { static juint Integer_rotateLeft(juint i, int distance) {
return (i << distance) | (i >> (32-distance)); return (i << distance) | (i >> (32 - distance));
} }
static juint murmur3_32(const int* data, int len); static juint murmur3_32(const int* data, int len);
static juint murmur3_32(juint seed, const int* data, int len); static juint murmur3_32(juint seed, const int* data, int len);
#ifndef PRODUCT
// Hashing functions used for internal testing
static juint murmur3_32(const jbyte* data, int len);
static juint murmur3_32(const jchar* data, int len);
static void testMurmur3_32_ByteArray();
static void testEquivalentHashes();
#endif // PRODUCT
public: public:
static juint compute_seed(); static juint compute_seed();
static juint murmur3_32(juint seed, const jbyte* data, int len); static juint murmur3_32(juint seed, const jbyte* data, int len);
static juint murmur3_32(juint seed, const jchar* data, int len); static juint murmur3_32(juint seed, const jchar* data, int len);
NOT_PRODUCT(static void test_alt_hash();)
}; };
#endif // SHARE_VM_CLASSFILE_ALTHASHING_HPP #endif // SHARE_VM_CLASSFILE_ALTHASHING_HPP

View file

@ -95,7 +95,6 @@
#define JAVA_6_VERSION 50 #define JAVA_6_VERSION 50
// Used for backward compatibility reasons: // Used for backward compatibility reasons:
// - to check NameAndType_info signatures more aggressively
// - to disallow argument and require ACC_STATIC for <clinit> methods // - to disallow argument and require ACC_STATIC for <clinit> methods
#define JAVA_7_VERSION 51 #define JAVA_7_VERSION 51
@ -564,7 +563,7 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
break; break;
} }
case JVM_CONSTANT_NameAndType: { case JVM_CONSTANT_NameAndType: {
if (_need_verify && _major_version >= JAVA_7_VERSION) { if (_need_verify) {
const int sig_index = cp->signature_ref_index_at(index); const int sig_index = cp->signature_ref_index_at(index);
const int name_index = cp->name_ref_index_at(index); const int name_index = cp->name_ref_index_at(index);
const Symbol* const name = cp->symbol_at(name_index); const Symbol* const name = cp->symbol_at(name_index);
@ -572,9 +571,17 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
guarantee_property(sig->utf8_length() != 0, guarantee_property(sig->utf8_length() != 0,
"Illegal zero length constant pool entry at %d in class %s", "Illegal zero length constant pool entry at %d in class %s",
sig_index, CHECK); sig_index, CHECK);
guarantee_property(name->utf8_length() != 0,
"Illegal zero length constant pool entry at %d in class %s",
name_index, CHECK);
if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) { if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
// Format check method name and signature
verify_legal_method_name(name, CHECK);
verify_legal_method_signature(name, sig, CHECK); verify_legal_method_signature(name, sig, CHECK);
} else { } else {
// Format check field name and signature
verify_legal_field_name(name, CHECK);
verify_legal_field_signature(name, sig, CHECK); verify_legal_field_signature(name, sig, CHECK);
} }
} }
@ -595,42 +602,32 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
const Symbol* const name = cp->symbol_at(name_ref_index); const Symbol* const name = cp->symbol_at(name_ref_index);
const Symbol* const signature = cp->symbol_at(signature_ref_index); const Symbol* const signature = cp->symbol_at(signature_ref_index);
if (tag == JVM_CONSTANT_Fieldref) { if (tag == JVM_CONSTANT_Fieldref) {
verify_legal_field_name(name, CHECK); if (_need_verify) {
if (_need_verify && _major_version >= JAVA_7_VERSION) { // Field name and signature are verified above, when iterating NameAndType_info.
// Signature is verified above, when iterating NameAndType_info. // Need only to be sure signature is non-zero length and the right type.
// Need only to be sure it's non-zero length and the right type.
if (signature->utf8_length() == 0 || if (signature->utf8_length() == 0 ||
signature->byte_at(0) == JVM_SIGNATURE_FUNC) { signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
throwIllegalSignature( throwIllegalSignature("Field", name, signature, CHECK);
"Field", name, signature, CHECK);
} }
} else {
verify_legal_field_signature(name, signature, CHECK);
} }
} else { } else {
verify_legal_method_name(name, CHECK); if (_need_verify) {
if (_need_verify && _major_version >= JAVA_7_VERSION) { // Method name and signature are verified above, when iterating NameAndType_info.
// Signature is verified above, when iterating NameAndType_info. // Need only to be sure signature is non-zero length and the right type.
// Need only to be sure it's non-zero length and the right type.
if (signature->utf8_length() == 0 || if (signature->utf8_length() == 0 ||
signature->byte_at(0) != JVM_SIGNATURE_FUNC) { signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
throwIllegalSignature( throwIllegalSignature("Method", name, signature, CHECK);
"Method", name, signature, CHECK);
} }
} else {
verify_legal_method_signature(name, signature, CHECK);
} }
if (tag == JVM_CONSTANT_Methodref) { // 4509014: If a class method name begins with '<', it must be "<init>"
// 4509014: If a class method name begins with '<', it must be "<init>". const unsigned int name_len = name->utf8_length();
assert(name != NULL, "method name in constant pool is null"); if (tag == JVM_CONSTANT_Methodref &&
const unsigned int name_len = name->utf8_length(); name_len != 0 &&
if (name_len != 0 && name->byte_at(0) == '<') { name->byte_at(0) == '<' &&
if (name != vmSymbols::object_initializer_name()) { name != vmSymbols::object_initializer_name()) {
classfile_parse_error( classfile_parse_error(
"Bad method name at constant pool index %u in class file %s", "Bad method name at constant pool index %u in class file %s",
name_ref_index, CHECK); name_ref_index, CHECK);
}
}
} }
} }
break; break;
@ -4843,19 +4840,28 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
} }
} }
else { else {
// 4900761: For class version > 48, any unicode is allowed in class name. // Skip leading 'L' and ignore first appearance of ';'
length--; length--;
signature++; signature++;
while (length > 0 && signature[0] != ';') { char* c = strchr((char*) signature, ';');
if (signature[0] == '.') { // Format check signature
classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0); if (c != NULL) {
} ResourceMark rm(THREAD);
length--; int newlen = c - (char*) signature;
signature++; char* sig = NEW_RESOURCE_ARRAY(char, newlen + 1);
} strncpy(sig, signature, newlen);
if (signature[0] == ';') { return signature + 1; } sig[newlen] = '\0';
}
bool legal = verify_unqualified_name(sig, newlen, LegalClass);
if (!legal) {
classfile_parse_error("Class name contains illegal character "
"in descriptor in class file %s",
CHECK_0);
return NULL;
}
return signature + newlen + 1;
}
}
return NULL; return NULL;
} }
case JVM_SIGNATURE_ARRAY: case JVM_SIGNATURE_ARRAY:
@ -4869,7 +4875,6 @@ const char* ClassFileParser::skip_over_field_signature(const char* signature,
length--; length--;
void_ok = false; void_ok = false;
break; break;
default: default:
return NULL; return NULL;
} }
@ -5402,6 +5407,59 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
debug_only(ik->verify();) debug_only(ik->verify();)
} }
// For an anonymous class that is in the unnamed package, move it to its host class's
// package by prepending its host class's package name to its class name and setting
// its _class_name field.
void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) {
ResourceMark rm(THREAD);
assert(strrchr(_class_name->as_C_string(), '/') == NULL,
"Anonymous class should not be in a package");
const char* host_pkg_name =
ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL);
if (host_pkg_name != NULL) {
size_t host_pkg_len = strlen(host_pkg_name);
int class_name_len = _class_name->utf8_length();
char* new_anon_name =
NEW_RESOURCE_ARRAY(char, host_pkg_len + 1 + class_name_len);
// Copy host package name and trailing /.
strncpy(new_anon_name, host_pkg_name, host_pkg_len);
new_anon_name[host_pkg_len] = '/';
// Append anonymous class name. The anonymous class name can contain odd
// characters. So, do a strncpy instead of using sprintf("%s...").
strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
// Create a symbol and update the anonymous class name.
_class_name = SymbolTable::new_symbol(new_anon_name,
(int)host_pkg_len + 1 + class_name_len,
CHECK);
}
}
// If the host class and the anonymous class are in the same package then do
// nothing. If the anonymous class is in the unnamed package then move it to its
// host's package. If the classes are in different packages then throw an IAE
// exception.
void ClassFileParser::fix_anonymous_class_name(TRAPS) {
assert(_host_klass != NULL, "Expected an anonymous class");
const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
_class_name->utf8_length(), '/');
if (anon_last_slash == NULL) { // Unnamed package
prepend_host_package_name(_host_klass, CHECK);
} else {
if (!InstanceKlass::is_same_class_package(_host_klass->class_loader(),
_host_klass->name(),
_host_klass->class_loader(),
_class_name)) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
err_msg("Host class %s and anonymous class %s are in different packages",
_host_klass->name()->as_C_string(), _class_name->as_C_string()));
}
}
}
static bool relax_format_check_for(ClassLoaderData* loader_data) { static bool relax_format_check_for(ClassLoaderData* loader_data) {
bool trusted = (loader_data->is_the_null_class_loader_data() || bool trusted = (loader_data->is_the_null_class_loader_data() ||
SystemDictionary::is_platform_class_loader(loader_data->class_loader())); SystemDictionary::is_platform_class_loader(loader_data->class_loader()));
@ -5417,7 +5475,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
Symbol* name, Symbol* name,
ClassLoaderData* loader_data, ClassLoaderData* loader_data,
Handle protection_domain, Handle protection_domain,
const Klass* host_klass, const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches, GrowableArray<Handle>* cp_patches,
Publicity pub_level, Publicity pub_level,
TRAPS) : TRAPS) :
@ -5692,6 +5750,13 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
return; return;
} }
// if this is an anonymous class fix up its name if it's in the unnamed
// package. Otherwise, throw IAE if it is in a different package than
// its host class.
if (_host_klass != NULL) {
fix_anonymous_class_name(CHECK);
}
// Verification prevents us from creating names with dots in them, this // Verification prevents us from creating names with dots in them, this
// asserts that that's the case. // asserts that that's the case.
assert(is_internal_format(_class_name), "external class name format used internally"); assert(is_internal_format(_class_name), "external class name format used internally");

View file

@ -79,7 +79,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
const Symbol* _requested_name; const Symbol* _requested_name;
Symbol* _class_name; Symbol* _class_name;
mutable ClassLoaderData* _loader_data; mutable ClassLoaderData* _loader_data;
const Klass* _host_klass; const InstanceKlass* _host_klass;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries GrowableArray<Handle>* _cp_patches; // overrides for CP entries
// Metadata created before the instance klass is created. Must be deallocated // Metadata created before the instance klass is created. Must be deallocated
@ -155,6 +155,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
ConstantPool* cp, ConstantPool* cp,
TRAPS); TRAPS);
void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS);
void fix_anonymous_class_name(TRAPS);
void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS); void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS);
void set_klass(InstanceKlass* instance); void set_klass(InstanceKlass* instance);
@ -474,7 +477,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Symbol* name, Symbol* name,
ClassLoaderData* loader_data, ClassLoaderData* loader_data,
Handle protection_domain, Handle protection_domain,
const Klass* host_klass, const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches, GrowableArray<Handle>* cp_patches,
Publicity pub_level, Publicity pub_level,
TRAPS); TRAPS);
@ -500,7 +503,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
bool is_anonymous() const { return _host_klass != NULL; } bool is_anonymous() const { return _host_klass != NULL; }
bool is_interface() const { return _access_flags.is_interface(); } bool is_interface() const { return _access_flags.is_interface(); }
const Klass* host_klass() const { return _host_klass; } const InstanceKlass* host_klass() const { return _host_klass; }
const GrowableArray<Handle>* cp_patches() const { return _cp_patches; } const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
ClassLoaderData* loader_data() const { return _loader_data; } ClassLoaderData* loader_data() const { return _loader_data; }
const Symbol* class_name() const { return _class_name; } const Symbol* class_name() const { return _class_name; }

View file

@ -1358,7 +1358,7 @@ ClassFileStream* ClassLoader::search_module_entries(const GrowableArray<ModuleCl
if (!Universe::is_module_initialized() && if (!Universe::is_module_initialized() &&
!ModuleEntryTable::javabase_defined() && !ModuleEntryTable::javabase_defined() &&
mod_entry == NULL) { mod_entry == NULL) {
mod_entry = ModuleEntryTable::javabase_module(); mod_entry = ModuleEntryTable::javabase_moduleEntry();
} }
// The module must be a named module // The module must be a named module
@ -1708,7 +1708,7 @@ void ClassLoader::create_javabase() {
if (jb_module == NULL) { if (jb_module == NULL) {
vm_exit_during_initialization("Unable to create ModuleEntry for java.base"); vm_exit_during_initialization("Unable to create ModuleEntry for java.base");
} }
ModuleEntryTable::set_javabase_module(jb_module); ModuleEntryTable::set_javabase_moduleEntry(jb_module);
} }
} }

View file

@ -50,12 +50,14 @@ class ClassFileStream;
class ClassPathEntry : public CHeapObj<mtClass> { class ClassPathEntry : public CHeapObj<mtClass> {
private: private:
ClassPathEntry* _next; ClassPathEntry* volatile _next;
public: public:
// Next entry in class path // Next entry in class path
ClassPathEntry* next() const { return _next; } ClassPathEntry* next() const {
return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next);
}
void set_next(ClassPathEntry* next) { void set_next(ClassPathEntry* next) {
// may have unlocked readers, so write atomically. // may have unlocked readers, so ensure visibility.
OrderAccess::release_store_ptr(&_next, next); OrderAccess::release_store_ptr(&_next, next);
} }
virtual bool is_jrt() = 0; virtual bool is_jrt() = 0;

View file

@ -966,7 +966,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
// Klasses to delete. // Klasses to delete.
bool walk_all_metadata = clean_previous_versions && bool walk_all_metadata = clean_previous_versions &&
JvmtiExport::has_redefined_a_class() && JvmtiExport::has_redefined_a_class() &&
InstanceKlass::has_previous_versions(); InstanceKlass::has_previous_versions_and_reset();
MetadataOnStackMark md_on_stack(walk_all_metadata); MetadataOnStackMark md_on_stack(walk_all_metadata);
// Save previous _unloading pointer for CMS which may add to unloading list before // Save previous _unloading pointer for CMS which may add to unloading list before

View file

@ -773,6 +773,41 @@ void java_lang_Class::initialize_mirror_fields(KlassHandle k,
InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK); InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK);
} }
// Set the java.lang.reflect.Module module field in the java_lang_Class mirror
void java_lang_Class::set_mirror_module_field(KlassHandle k, Handle mirror, Handle module, TRAPS) {
if (module.is_null()) {
// During startup, the module may be NULL only if java.base has not been defined yet.
// Put the class on the fixup_module_list to patch later when the java.lang.reflect.Module
// for java.base is known.
assert(!Universe::is_module_initialized(), "Incorrect java.lang.reflect.Module pre module system initialization");
MutexLocker m1(Module_lock, THREAD);
// Keep list of classes needing java.base module fixup
if (!ModuleEntryTable::javabase_defined()) {
if (fixup_module_field_list() == NULL) {
GrowableArray<Klass*>* list =
new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true);
set_fixup_module_field_list(list);
}
k->class_loader_data()->inc_keep_alive();
fixup_module_field_list()->push(k());
} else {
// java.base was defined at some point between calling create_mirror()
// and obtaining the Module_lock, patch this particular class with java.base.
ModuleEntry *javabase_entry = ModuleEntryTable::javabase_moduleEntry();
assert(javabase_entry != NULL && javabase_entry->module() != NULL,
"Setting class module field, java.base should be defined");
Handle javabase_handle(THREAD, JNIHandles::resolve(javabase_entry->module()));
set_module(mirror(), javabase_handle());
}
} else {
assert(Universe::is_module_initialized() ||
(ModuleEntryTable::javabase_defined() &&
(module() == JNIHandles::resolve(ModuleEntryTable::javabase_moduleEntry()->module()))),
"Incorrect java.lang.reflect.Module specification while creating mirror");
set_module(mirror(), module());
}
}
void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader,
Handle module, Handle protection_domain, TRAPS) { Handle module, Handle protection_domain, TRAPS) {
assert(k->java_mirror() == NULL, "should only assign mirror once"); assert(k->java_mirror() == NULL, "should only assign mirror once");
@ -835,25 +870,13 @@ void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader,
set_class_loader(mirror(), class_loader()); set_class_loader(mirror(), class_loader());
// set the module field in the java_lang_Class instance // set the module field in the java_lang_Class instance
// This may be null during bootstrap but will get fixed up later on. set_mirror_module_field(k, mirror, module, THREAD);
set_module(mirror(), module());
// Setup indirection from klass->mirror last // Setup indirection from klass->mirror last
// after any exceptions can happen during allocations. // after any exceptions can happen during allocations.
if (!k.is_null()) { if (!k.is_null()) {
k->set_java_mirror(mirror()); k->set_java_mirror(mirror());
} }
// Keep list of classes needing java.base module fixup.
if (!ModuleEntryTable::javabase_defined()) {
if (fixup_module_field_list() == NULL) {
GrowableArray<Klass*>* list =
new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true);
set_fixup_module_field_list(list);
}
k->class_loader_data()->inc_keep_alive();
fixup_module_field_list()->push(k());
}
} else { } else {
if (fixup_mirror_list() == NULL) { if (fixup_mirror_list() == NULL) {
GrowableArray<Klass*>* list = GrowableArray<Klass*>* list =
@ -3015,41 +3038,6 @@ void java_lang_boxing_object::print(BasicType type, jvalue* value, outputStream*
} }
} }
// Support for java_lang_ref_Reference
HeapWord *java_lang_ref_Reference::pending_list_lock_addr() {
InstanceKlass* ik = SystemDictionary::Reference_klass();
address addr = ik->static_field_addr(static_lock_offset);
return (HeapWord*) addr;
}
oop java_lang_ref_Reference::pending_list_lock() {
InstanceKlass* ik = SystemDictionary::Reference_klass();
address addr = ik->static_field_addr(static_lock_offset);
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
} else {
return oopDesc::load_decode_heap_oop((oop*)addr);
}
}
HeapWord *java_lang_ref_Reference::pending_list_addr() {
InstanceKlass* ik = SystemDictionary::Reference_klass();
address addr = ik->static_field_addr(static_pending_offset);
// XXX This might not be HeapWord aligned, almost rather be char *.
return (HeapWord*)addr;
}
oop java_lang_ref_Reference::pending_list() {
char *addr = (char *)pending_list_addr();
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
} else {
return oopDesc::load_decode_heap_oop((oop*)addr);
}
}
// Support for java_lang_ref_SoftReference // Support for java_lang_ref_SoftReference
jlong java_lang_ref_SoftReference::timestamp(oop ref) { jlong java_lang_ref_SoftReference::timestamp(oop ref) {
@ -3616,8 +3604,6 @@ int java_lang_ref_Reference::referent_offset;
int java_lang_ref_Reference::queue_offset; int java_lang_ref_Reference::queue_offset;
int java_lang_ref_Reference::next_offset; int java_lang_ref_Reference::next_offset;
int java_lang_ref_Reference::discovered_offset; int java_lang_ref_Reference::discovered_offset;
int java_lang_ref_Reference::static_lock_offset;
int java_lang_ref_Reference::static_pending_offset;
int java_lang_ref_Reference::number_of_fake_oop_fields; int java_lang_ref_Reference::number_of_fake_oop_fields;
int java_lang_ref_SoftReference::timestamp_offset; int java_lang_ref_SoftReference::timestamp_offset;
int java_lang_ref_SoftReference::static_clock_offset; int java_lang_ref_SoftReference::static_clock_offset;
@ -3772,8 +3758,6 @@ void JavaClasses::compute_hard_coded_offsets() {
java_lang_ref_Reference::queue_offset = java_lang_ref_Reference::hc_queue_offset * x + header; java_lang_ref_Reference::queue_offset = java_lang_ref_Reference::hc_queue_offset * x + header;
java_lang_ref_Reference::next_offset = java_lang_ref_Reference::hc_next_offset * x + header; java_lang_ref_Reference::next_offset = java_lang_ref_Reference::hc_next_offset * x + header;
java_lang_ref_Reference::discovered_offset = java_lang_ref_Reference::hc_discovered_offset * x + header; java_lang_ref_Reference::discovered_offset = java_lang_ref_Reference::hc_discovered_offset * x + header;
java_lang_ref_Reference::static_lock_offset = java_lang_ref_Reference::hc_static_lock_offset * x;
java_lang_ref_Reference::static_pending_offset = java_lang_ref_Reference::hc_static_pending_offset * x;
// Artificial fields for java_lang_ref_Reference // Artificial fields for java_lang_ref_Reference
// The first field is for the discovered field added in 1.4 // The first field is for the discovered field added in 1.4
java_lang_ref_Reference::number_of_fake_oop_fields = 1; java_lang_ref_Reference::number_of_fake_oop_fields = 1;
@ -4006,8 +3990,6 @@ void JavaClasses::check_offsets() {
CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, next, "Ljava/lang/ref/Reference;"); CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, next, "Ljava/lang/ref/Reference;");
// Fake field // Fake field
//CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;"); //CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;");
CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, lock, "Ljava/lang/ref/Reference$Lock;");
CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, pending, "Ljava/lang/ref/Reference;");
// java.lang.ref.SoftReference // java.lang.ref.SoftReference

View file

@ -219,6 +219,7 @@ class java_lang_Class : AllStatic {
static void set_class_loader(oop java_class, oop class_loader); static void set_class_loader(oop java_class, oop class_loader);
static void set_component_mirror(oop java_class, oop comp_mirror); static void set_component_mirror(oop java_class, oop comp_mirror);
static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS); static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
static void set_mirror_module_field(KlassHandle K, Handle mirror, Handle module, TRAPS);
public: public:
static void compute_offsets(); static void compute_offsets();
@ -886,17 +887,11 @@ class java_lang_ref_Reference: AllStatic {
hc_next_offset = 2, hc_next_offset = 2,
hc_discovered_offset = 3 // Is not last, see SoftRefs. hc_discovered_offset = 3 // Is not last, see SoftRefs.
}; };
enum {
hc_static_lock_offset = 0,
hc_static_pending_offset = 1
};
static int referent_offset; static int referent_offset;
static int queue_offset; static int queue_offset;
static int next_offset; static int next_offset;
static int discovered_offset; static int discovered_offset;
static int static_lock_offset;
static int static_pending_offset;
static int number_of_fake_oop_fields; static int number_of_fake_oop_fields;
// Accessors // Accessors
@ -912,13 +907,6 @@ class java_lang_ref_Reference: AllStatic {
static inline void set_discovered(oop ref, oop value); static inline void set_discovered(oop ref, oop value);
static inline void set_discovered_raw(oop ref, oop value); static inline void set_discovered_raw(oop ref, oop value);
static inline HeapWord* discovered_addr(oop ref); static inline HeapWord* discovered_addr(oop ref);
// Accessors for statics
static oop pending_list_lock();
static oop pending_list();
static HeapWord* pending_list_lock_addr();
static HeapWord* pending_list_addr();
}; };

View file

@ -25,12 +25,85 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/classFileParser.hpp" #include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp" #include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp" #include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/klassFactory.hpp" #include "classfile/klassFactory.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "prims/jvmtiEnvBase.hpp" #include "prims/jvmtiEnvBase.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
#include "trace/traceMacros.hpp" #include "trace/traceMacros.hpp"
// called during initial loading of a shared class
instanceKlassHandle KlassFactory::check_shared_class_file_load_hook(
instanceKlassHandle ik,
Symbol* class_name,
Handle class_loader,
Handle protection_domain, TRAPS) {
#if INCLUDE_CDS && INCLUDE_JVMTI
assert(ik.not_null(), "sanity");
assert(ik()->is_shared(), "expecting a shared class");
if (JvmtiExport::should_post_class_file_load_hook()) {
assert(THREAD->is_Java_thread(), "must be JavaThread");
// Post the CFLH
JvmtiCachedClassFileData* cached_class_file = NULL;
JvmtiCachedClassFileData* archived_class_data = ik->get_archived_class_data();
assert(archived_class_data != NULL, "shared class has no archived class data");
unsigned char* ptr =
VM_RedefineClasses::get_cached_class_file_bytes(archived_class_data);
unsigned char* end_ptr =
ptr + VM_RedefineClasses::get_cached_class_file_len(archived_class_data);
unsigned char* old_ptr = ptr;
JvmtiExport::post_class_file_load_hook(class_name,
class_loader,
protection_domain,
&ptr,
&end_ptr,
&cached_class_file);
if (old_ptr != ptr) {
// JVMTI agent has modified class file data.
// Set new class file stream using JVMTI agent modified class file data.
ClassLoaderData* loader_data =
ClassLoaderData::class_loader_data(class_loader());
int path_index = ik->shared_classpath_index();
SharedClassPathEntry* ent =
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
ClassFileStream* stream = new ClassFileStream(ptr,
end_ptr - ptr,
ent->_name,
ClassFileStream::verify);
ClassFileParser parser(stream,
class_name,
loader_data,
protection_domain,
NULL,
NULL,
ClassFileParser::BROADCAST, // publicity level
CHECK_NULL);
instanceKlassHandle new_ik = parser.create_instance_klass(true /* changed_by_loadhook */,
CHECK_NULL);
if (cached_class_file != NULL) {
new_ik->set_cached_class_file(cached_class_file);
}
if (class_loader.is_null()) {
ResourceMark rm;
ClassLoader::add_package(class_name->as_C_string(), path_index, THREAD);
}
return new_ik;
}
}
#endif
return NULL;
}
static ClassFileStream* check_class_file_load_hook(ClassFileStream* stream, static ClassFileStream* check_class_file_load_hook(ClassFileStream* stream,
Symbol* name, Symbol* name,
ClassLoaderData* loader_data, ClassLoaderData* loader_data,
@ -94,10 +167,9 @@ instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
Symbol* name, Symbol* name,
ClassLoaderData* loader_data, ClassLoaderData* loader_data,
Handle protection_domain, Handle protection_domain,
const Klass* host_klass, const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches, GrowableArray<Handle>* cp_patches,
TRAPS) { TRAPS) {
assert(stream != NULL, "invariant"); assert(stream != NULL, "invariant");
assert(loader_data != NULL, "invariant"); assert(loader_data != NULL, "invariant");
assert(THREAD->is_Java_thread(), "must be a JavaThread"); assert(THREAD->is_Java_thread(), "must be a JavaThread");
@ -142,5 +214,27 @@ instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
TRACE_KLASS_CREATION(result, parser, THREAD); TRACE_KLASS_CREATION(result, parser, THREAD);
#if INCLUDE_CDS && INCLUDE_JVMTI
if (DumpSharedSpaces) {
assert(cached_class_file == NULL, "Sanity");
// Archive the class stream data into the optional data section
JvmtiCachedClassFileData *p;
int len;
const unsigned char *bytes;
// event based tracing might set cached_class_file
if ((bytes = result->get_cached_class_file_bytes()) != NULL) {
len = result->get_cached_class_file_len();
} else {
len = stream->length();
bytes = stream->buffer();
}
p = (JvmtiCachedClassFileData*)MetaspaceShared::optional_data_space_alloc(
offset_of(JvmtiCachedClassFileData, data) + len);
p->length = len;
memcpy(p->data, bytes, len);
result->set_archived_class_data(p);
}
#endif
return result; return result;
} }

View file

@ -72,9 +72,15 @@ class KlassFactory : AllStatic {
Symbol* name, Symbol* name,
ClassLoaderData* loader_data, ClassLoaderData* loader_data,
Handle protection_domain, Handle protection_domain,
const Klass* host_klass, const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches, GrowableArray<Handle>* cp_patches,
TRAPS); TRAPS);
public:
static instanceKlassHandle check_shared_class_file_load_hook(
instanceKlassHandle ik,
Symbol* class_name,
Handle class_loader,
Handle protection_domain, TRAPS);
}; };
#endif // SHARE_VM_CLASSFILE_KLASSFACTORY_HPP #endif // SHARE_VM_CLASSFILE_KLASSFACTORY_HPP

View file

@ -92,7 +92,7 @@ bool ModuleEntry::can_read(ModuleEntry* m) const {
// read java.base. If either of these conditions // read java.base. If either of these conditions
// hold, readability has been established. // hold, readability has been established.
if (!this->is_named() || if (!this->is_named() ||
(m == ModuleEntryTable::javabase_module())) { (m == ModuleEntryTable::javabase_moduleEntry())) {
return true; return true;
} }
@ -358,16 +358,27 @@ void ModuleEntryTable::finalize_javabase(Handle module_handle, Symbol* version,
} }
// Set java.lang.reflect.Module, version and location for java.base // Set java.lang.reflect.Module, version and location for java.base
ModuleEntry* jb_module = javabase_module(); ModuleEntry* jb_module = javabase_moduleEntry();
assert(jb_module != NULL, "java.base ModuleEntry not defined"); assert(jb_module != NULL, "java.base ModuleEntry not defined");
jb_module->set_module(boot_loader_data->add_handle(module_handle));
jb_module->set_version(version); jb_module->set_version(version);
jb_module->set_location(location); jb_module->set_location(location);
// Once java.base's ModuleEntry _module field is set with the known
// java.lang.reflect.Module, java.base is considered "defined" to the VM.
jb_module->set_module(boot_loader_data->add_handle(module_handle));
// Store pointer to the ModuleEntry for java.base in the java.lang.reflect.Module object. // Store pointer to the ModuleEntry for java.base in the java.lang.reflect.Module object.
java_lang_reflect_Module::set_module_entry(module_handle(), jb_module); java_lang_reflect_Module::set_module_entry(module_handle(), jb_module);
// Patch any previously loaded classes' module field with java.base's java.lang.reflect.Module.
patch_javabase_entries(module_handle);
} }
// Within java.lang.Class instances there is a java.lang.reflect.Module field
// that must be set with the defining module. During startup, prior to java.base's
// definition, classes needing their module field set are added to the fixup_module_list.
// Their module field is set once java.base's java.lang.reflect.Module is known to the VM.
void ModuleEntryTable::patch_javabase_entries(Handle module_handle) { void ModuleEntryTable::patch_javabase_entries(Handle module_handle) {
assert(Module_lock->owned_by_self(), "should have the Module_lock");
if (module_handle.is_null()) { if (module_handle.is_null()) {
fatal("Unable to patch the module field of classes loaded prior to java.base's definition, invalid java.lang.reflect.Module"); fatal("Unable to patch the module field of classes loaded prior to java.base's definition, invalid java.lang.reflect.Module");
} }
@ -389,9 +400,7 @@ void ModuleEntryTable::patch_javabase_entries(Handle module_handle) {
for (int i = 0; i < list_length; i++) { for (int i = 0; i < list_length; i++) {
Klass* k = list->at(i); Klass* k = list->at(i);
assert(k->is_klass(), "List should only hold classes"); assert(k->is_klass(), "List should only hold classes");
Thread* THREAD = Thread::current(); java_lang_Class::fixup_module_field(KlassHandle(k), module_handle);
KlassHandle kh(THREAD, k);
java_lang_Class::fixup_module_field(kh, module_handle);
k->class_loader_data()->dec_keep_alive(); k->class_loader_data()->dec_keep_alive();
} }

View file

@ -78,11 +78,11 @@ public:
_must_walk_reads = false; _must_walk_reads = false;
} }
Symbol* name() const { return literal(); } Symbol* name() const { return literal(); }
void set_name(Symbol* n) { set_literal(n); } void set_name(Symbol* n) { set_literal(n); }
jobject module() const { return _module; } jobject module() const { return _module; }
void set_module(jobject j) { _module = j; } void set_module(jobject j) { _module = j; }
// The shared ProtectionDomain reference is set once the VM loads a shared class // The shared ProtectionDomain reference is set once the VM loads a shared class
// originated from the current Module. The referenced ProtectionDomain object is // originated from the current Module. The referenced ProtectionDomain object is
@ -217,13 +217,13 @@ public:
// Special handling for unnamed module, one per class loader's ModuleEntryTable // Special handling for unnamed module, one per class loader's ModuleEntryTable
void create_unnamed_module(ClassLoaderData* loader_data); void create_unnamed_module(ClassLoaderData* loader_data);
ModuleEntry* unnamed_module() { return _unnamed_module; } ModuleEntry* unnamed_module() { return _unnamed_module; }
// Special handling for java.base // Special handling for java.base
static ModuleEntry* javabase_module() { return _javabase_module; } static ModuleEntry* javabase_moduleEntry() { return _javabase_module; }
static void set_javabase_module(ModuleEntry* java_base) { _javabase_module = java_base; } static void set_javabase_moduleEntry(ModuleEntry* java_base) { _javabase_module = java_base; }
static bool javabase_defined() { return ((_javabase_module != NULL) && static bool javabase_defined() { return ((_javabase_module != NULL) &&
(_javabase_module->module() != NULL)); } (_javabase_module->module() != NULL)); }
static void finalize_javabase(Handle module_handle, Symbol* version, Symbol* location); static void finalize_javabase(Handle module_handle, Symbol* version, Symbol* location);
static void patch_javabase_entries(Handle module_handle); static void patch_javabase_entries(Handle module_handle);

View file

@ -206,7 +206,7 @@ static void define_javabase_module(jobject module, jstring version,
assert(pkg_list->length() == 0 || package_table != NULL, "Bad package_table"); assert(pkg_list->length() == 0 || package_table != NULL, "Bad package_table");
// Ensure java.base's ModuleEntry has been created // Ensure java.base's ModuleEntry has been created
assert(ModuleEntryTable::javabase_module() != NULL, "No ModuleEntry for java.base"); assert(ModuleEntryTable::javabase_moduleEntry() != NULL, "No ModuleEntry for java.base");
bool duplicate_javabase = false; bool duplicate_javabase = false;
{ {
@ -226,7 +226,7 @@ static void define_javabase_module(jobject module, jstring version,
for (int x = 0; x < pkg_list->length(); x++) { for (int x = 0; x < pkg_list->length(); x++) {
// Some of java.base's packages were added early in bootstrapping, ignore duplicates. // Some of java.base's packages were added early in bootstrapping, ignore duplicates.
if (package_table->lookup_only(pkg_list->at(x)) == NULL) { if (package_table->lookup_only(pkg_list->at(x)) == NULL) {
pkg = package_table->locked_create_entry_or_null(pkg_list->at(x), ModuleEntryTable::javabase_module()); pkg = package_table->locked_create_entry_or_null(pkg_list->at(x), ModuleEntryTable::javabase_moduleEntry());
assert(pkg != NULL, "Unable to create a java.base package entry"); assert(pkg != NULL, "Unable to create a java.base package entry");
} }
// Unable to have a GrowableArray of TempNewSymbol. Must decrement the refcount of // Unable to have a GrowableArray of TempNewSymbol. Must decrement the refcount of
@ -255,9 +255,6 @@ static void define_javabase_module(jobject module, jstring version,
log_trace(modules)("define_javabase_module(): creation of package %s for module java.base", log_trace(modules)("define_javabase_module(): creation of package %s for module java.base",
(pkg_list->at(x))->as_C_string()); (pkg_list->at(x))->as_C_string());
} }
// Patch any previously loaded classes' module field with java.base's jlr.Module.
ModuleEntryTable::patch_javabase_entries(module_handle);
} }
void Modules::define_module(jobject module, jstring version, void Modules::define_module(jobject module, jstring version,

View file

@ -1027,7 +1027,7 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
Handle class_loader, Handle class_loader,
Handle protection_domain, Handle protection_domain,
ClassFileStream* st, ClassFileStream* st,
const Klass* host_klass, const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches, GrowableArray<Handle>* cp_patches,
TRAPS) { TRAPS) {
@ -1210,16 +1210,12 @@ Klass* SystemDictionary::find_shared_class(Symbol* class_name) {
instanceKlassHandle SystemDictionary::load_shared_class( instanceKlassHandle SystemDictionary::load_shared_class(
Symbol* class_name, Handle class_loader, TRAPS) { Symbol* class_name, Handle class_loader, TRAPS) {
// Don't load shared class when JvmtiExport::should_post_class_file_load_hook() instanceKlassHandle ik (THREAD, find_shared_class(class_name));
// is enabled since posting CFLH is not supported when loading shared class. // Make sure we only return the boot class for the NULL classloader.
if (!JvmtiExport::should_post_class_file_load_hook()) { if (ik.not_null() &&
instanceKlassHandle ik (THREAD, find_shared_class(class_name)); ik->is_shared_boot_class() && class_loader.is_null()) {
// Make sure we only return the boot class for the NULL classloader. Handle protection_domain;
if (ik.not_null() && return load_shared_class(ik, class_loader, protection_domain, THREAD);
ik->is_shared_boot_class() && class_loader.is_null()) {
Handle protection_domain;
return load_shared_class(ik, class_loader, protection_domain, THREAD);
}
} }
return instanceKlassHandle(); return instanceKlassHandle();
} }
@ -1303,11 +1299,6 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
Handle class_loader, Handle class_loader,
Handle protection_domain, TRAPS) { Handle protection_domain, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle instanceKlassHandle nh = instanceKlassHandle(); // null Handle
if (JvmtiExport::should_post_class_file_load_hook()) {
// Don't load shared class when JvmtiExport::should_post_class_file_load_hook()
// is enabled since posting CFLH is not supported when loading shared class.
return nh;
}
if (ik.not_null()) { if (ik.not_null()) {
Symbol* class_name = ik->name(); Symbol* class_name = ik->name();
@ -1358,6 +1349,14 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
} }
} }
instanceKlassHandle new_ik = KlassFactory::check_shared_class_file_load_hook(
ik, class_name, class_loader, protection_domain, CHECK_(nh));
if (new_ik.not_null()) {
// The class is changed by CFLH. Return the new class. The shared class is
// not used.
return new_ik;
}
// Adjust methods to recover missing data. They need addresses for // Adjust methods to recover missing data. They need addresses for
// interpreter entry points and their default native method address // interpreter entry points and their default native method address
// must be reset. // must be reset.

View file

@ -299,7 +299,7 @@ public:
Handle class_loader, Handle class_loader,
Handle protection_domain, Handle protection_domain,
ClassFileStream* st, ClassFileStream* st,
const Klass* host_klass, const InstanceKlass* host_klass,
GrowableArray<Handle>* cp_patches, GrowableArray<Handle>* cp_patches,
TRAPS); TRAPS);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -95,7 +95,8 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
Category2_2nd = (Category2_2ndFlag << 1 * BitsPerByte) | Primitive, Category2_2nd = (Category2_2ndFlag << 1 * BitsPerByte) | Primitive,
// Primitive values (type descriminator stored in most-signifcant bytes) // Primitive values (type descriminator stored in most-signifcant bytes)
Bogus = (ITEM_Bogus << 2 * BitsPerByte) | Category1, // Bogus needs the " | Primitive". Else, is_reference(Bogus) returns TRUE.
Bogus = (ITEM_Bogus << 2 * BitsPerByte) | Primitive,
Boolean = (ITEM_Boolean << 2 * BitsPerByte) | Category1, Boolean = (ITEM_Boolean << 2 * BitsPerByte) | Category1,
Byte = (ITEM_Byte << 2 * BitsPerByte) | Category1, Byte = (ITEM_Byte << 2 * BitsPerByte) | Category1,
Short = (ITEM_Short << 2 * BitsPerByte) | Category1, Short = (ITEM_Short << 2 * BitsPerByte) | Category1,

View file

@ -67,12 +67,12 @@ static void* volatile _verify_byte_codes_fn = NULL;
static volatile jint _is_new_verify_byte_codes_fn = (jint) true; static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
static void* verify_byte_codes_fn() { static void* verify_byte_codes_fn() {
if (_verify_byte_codes_fn == NULL) { if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) {
void *lib_handle = os::native_java_library(); void *lib_handle = os::native_java_library();
void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion"); void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func); OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
if (func == NULL) { if (func == NULL) {
OrderAccess::release_store(&_is_new_verify_byte_codes_fn, false); _is_new_verify_byte_codes_fn = false;
func = os::dll_lookup(lib_handle, "VerifyClassCodes"); func = os::dll_lookup(lib_handle, "VerifyClassCodes");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func); OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
} }
@ -2786,7 +2786,7 @@ void ClassVerifier::verify_invoke_instructions(
// direct interface relative to the host class // direct interface relative to the host class
have_imr_indirect = (have_imr_indirect && have_imr_indirect = (have_imr_indirect &&
!is_same_or_direct_interface( !is_same_or_direct_interface(
InstanceKlass::cast(current_class()->host_klass()), current_class()->host_klass(),
host_klass_type, ref_class_type)); host_klass_type, ref_class_type));
} }
if (!subtype) { if (!subtype) {

View file

@ -368,6 +368,7 @@ bool vmIntrinsics::can_trap(vmIntrinsics::ID id) {
switch(id) { switch(id) {
#ifdef TRACE_HAVE_INTRINSICS #ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_counterTime: case vmIntrinsics::_counterTime:
case vmIntrinsics::_getClassId:
#endif #endif
case vmIntrinsics::_currentTimeMillis: case vmIntrinsics::_currentTimeMillis:
case vmIntrinsics::_nanoTime: case vmIntrinsics::_nanoTime:

View file

@ -32,7 +32,6 @@
#include "compiler/compileLog.hpp" #include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp" #include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp" #include "compiler/directivesParser.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "interpreter/linkResolver.hpp" #include "interpreter/linkResolver.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
@ -893,15 +892,6 @@ void CompileBroker::compile_method_base(const methodHandle& method,
return; return;
} }
// If the requesting thread is holding the pending list lock
// then we just return. We can't risk blocking while holding
// the pending list lock or a 3-way deadlock may occur
// between the reference handler thread, a GC (instigated
// by a compiler thread), and compiled method registration.
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
if (TieredCompilation) { if (TieredCompilation) {
// Tiered policy requires MethodCounters to exist before adding a method to // Tiered policy requires MethodCounters to exist before adding a method to
// the queue. Create if we don't have them yet. // the queue. Create if we don't have them yet.

View file

@ -3511,6 +3511,7 @@ bool CMSCollector::do_marking_mt() {
conc_workers()->active_workers(), conc_workers()->active_workers(),
Threads::number_of_non_daemon_threads()); Threads::number_of_non_daemon_threads());
num_workers = conc_workers()->update_active_workers(num_workers); num_workers = conc_workers()->update_active_workers(num_workers);
log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();

View file

@ -28,7 +28,6 @@
#include "gc/cms/concurrentMarkSweepThread.hpp" #include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/gcId.hpp" #include "gc/shared/gcId.hpp"
#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
@ -77,23 +76,6 @@ void ConcurrentMarkSweepThread::run_service() {
log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread); log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
} }
{
MutexLockerEx x(CGC_lock, true);
set_CMS_flag(CMS_cms_wants_token);
assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
// Wait until the surrogate locker thread that will do
// pending list locking on our behalf has been created.
// We cannot start the SLT thread ourselves since we need
// to be a JavaThread to do so.
CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
while (!ReferencePendingListLocker::is_initialized() && !should_terminate()) {
CGC_lock->wait(true, 200);
loopY.tick();
}
clear_CMS_flag(CMS_cms_wants_token);
}
while (!should_terminate()) { while (!should_terminate()) {
sleepBeforeNextCycle(); sleepBeforeNextCycle();
if (should_terminate()) break; if (should_terminate()) break;

View file

@ -899,6 +899,8 @@ void ParNewGeneration::collect(bool full,
workers->active_workers(), workers->active_workers(),
Threads::number_of_non_daemon_threads()); Threads::number_of_non_daemon_threads());
active_workers = workers->update_active_workers(active_workers); active_workers = workers->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
_old_gen = gch->old_gen(); _old_gen = gch->old_gen();
// If the next generation is too full to accommodate worst-case promotion // If the next generation is too full to accommodate worst-case promotion
@ -1364,22 +1366,25 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
return false; return false;
} }
assert(prefix != NULL && prefix != BUSY, "Error"); assert(prefix != NULL && prefix != BUSY, "Error");
size_t i = 1;
oop cur = prefix; oop cur = prefix;
while (i < objsFromOverflow && cur->klass_or_null() != NULL) { for (size_t i = 1; i < objsFromOverflow; ++i) {
i++; cur = cur->list_ptr_from_klass(); oop next = cur->list_ptr_from_klass();
if (next == NULL) break;
cur = next;
} }
assert(cur != NULL, "Loop postcondition");
// Reattach remaining (suffix) to overflow list // Reattach remaining (suffix) to overflow list
if (cur->klass_or_null() == NULL) { oop suffix = cur->list_ptr_from_klass();
if (suffix == NULL) {
// Write back the NULL in lieu of the BUSY we wrote // Write back the NULL in lieu of the BUSY we wrote
// above and it is still the same value. // above and it is still the same value.
if (_overflow_list == BUSY) { if (_overflow_list == BUSY) {
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
} }
} else { } else {
assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); assert(suffix != BUSY, "Error");
oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list // suffix will be put back on global list
cur->set_klass_to_list_ptr(NULL); // break off suffix cur->set_klass_to_list_ptr(NULL); // break off suffix
// It's possible that the list is still in the empty(busy) state // It's possible that the list is still in the empty(busy) state
// we left it in a short while ago; in that case we may be // we left it in a short while ago; in that case we may be
@ -1399,8 +1404,10 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
// Too bad, someone else got in in between; we'll need to do a splice. // Too bad, someone else got in in between; we'll need to do a splice.
// Find the last item of suffix list // Find the last item of suffix list
oop last = suffix; oop last = suffix;
while (last->klass_or_null() != NULL) { while (true) {
last = last->list_ptr_from_klass(); oop next = last->list_ptr_from_klass();
if (next == NULL) break;
last = next;
} }
// Atomically prepend suffix to current overflow list // Atomically prepend suffix to current overflow list
observed_overflow_list = _overflow_list; observed_overflow_list = _overflow_list;

View file

@ -37,14 +37,6 @@
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
// Methods in abstract class VM_CMS_Operation // Methods in abstract class VM_CMS_Operation
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
void VM_CMS_Operation::acquire_pending_list_lock() {
_pending_list_locker.lock();
}
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
_pending_list_locker.unlock();
}
void VM_CMS_Operation::verify_before_gc() { void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC && if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
@ -85,17 +77,10 @@ bool VM_CMS_Operation::doit_prologue() {
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"Possible deadlock"); "Possible deadlock");
if (needs_pending_list_lock()) {
acquire_pending_list_lock();
}
// Get the Heap_lock after the pending_list_lock.
Heap_lock->lock(); Heap_lock->lock();
if (lost_race()) { if (lost_race()) {
assert(_prologue_succeeded == false, "Initialized in c'tor"); assert(_prologue_succeeded == false, "Initialized in c'tor");
Heap_lock->unlock(); Heap_lock->unlock();
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
}
} else { } else {
_prologue_succeeded = true; _prologue_succeeded = true;
} }
@ -108,11 +93,10 @@ void VM_CMS_Operation::doit_epilogue() {
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"Possible deadlock"); "Possible deadlock");
// Release the Heap_lock first. if (Universe::has_reference_pending_list()) {
Heap_lock->unlock(); Heap_lock->notify_all();
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
} }
Heap_lock->unlock();
} }
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
@ -230,9 +214,11 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
Thread* thr = Thread::current(); Thread* thr = Thread::current();
assert(thr->is_Java_thread(), "just checking"); assert(thr->is_Java_thread(), "just checking");
JavaThread* jt = (JavaThread*)thr; JavaThread* jt = (JavaThread*)thr;
// Release the Heap_lock first.
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Heap_lock->unlock(); Heap_lock->unlock();
release_and_notify_pending_list_lock();
// It is fine to test whether completed collections has // It is fine to test whether completed collections has
// exceeded our request count without locking because // exceeded our request count without locking because

View file

@ -28,7 +28,6 @@
#include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/shared/gcCause.hpp" #include "gc/shared/gcCause.hpp"
#include "gc/shared/gcId.hpp" #include "gc/shared/gcId.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "gc/shared/vmGCOperations.hpp" #include "gc/shared/vmGCOperations.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
@ -52,9 +51,6 @@
class CMSCollector; class CMSCollector;
class VM_CMS_Operation: public VM_Operation { class VM_CMS_Operation: public VM_Operation {
private:
ReferencePendingListLocker _pending_list_locker;
protected: protected:
CMSCollector* _collector; // associated collector CMSCollector* _collector; // associated collector
bool _prologue_succeeded; // whether doit_prologue succeeded bool _prologue_succeeded; // whether doit_prologue succeeded
@ -62,10 +58,6 @@ class VM_CMS_Operation: public VM_Operation {
bool lost_race() const; bool lost_race() const;
// java.lang.ref.Reference support
void acquire_pending_list_lock();
void release_and_notify_pending_list_lock();
public: public:
VM_CMS_Operation(CMSCollector* collector): VM_CMS_Operation(CMSCollector* collector):
_collector(collector), _collector(collector),

View file

@ -175,7 +175,7 @@ void ConcurrentMarkThread::run_service() {
TimeHelper::counter_to_millis(mark_end - mark_start)); TimeHelper::counter_to_millis(mark_end - mark_start));
CMCheckpointRootsFinalClosure final_cl(_cm); CMCheckpointRootsFinalClosure final_cl(_cm);
VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */); VM_CGC_Operation op(&final_cl, "Pause Remark");
VMThread::execute(&op); VMThread::execute(&op);
} }
if (cm()->restart_for_overflow()) { if (cm()->restart_for_overflow()) {
@ -199,7 +199,7 @@ void ConcurrentMarkThread::run_service() {
delay_to_keep_mmu(g1_policy, false /* cleanup */); delay_to_keep_mmu(g1_policy, false /* cleanup */);
CMCleanUp cl_cl(_cm); CMCleanUp cl_cl(_cm);
VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */); VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
VMThread::execute(&op); VMThread::execute(&op);
} else { } else {
// We don't want to update the marking status if a GC pause // We don't want to update the marking status if a GC pause

View file

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "code/nmethod.hpp" #include "code/nmethod.hpp"
#include "gc/g1/g1CodeRootSetTable.hpp"
#include "gc/g1/g1CodeCacheRemSet.hpp" #include "gc/g1/g1CodeCacheRemSet.hpp"
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "memory/heap.hpp" #include "memory/heap.hpp"
@ -33,58 +34,13 @@
#include "utilities/hashtable.inline.hpp" #include "utilities/hashtable.inline.hpp"
#include "utilities/stack.inline.hpp" #include "utilities/stack.inline.hpp"
class CodeRootSetTable : public Hashtable<nmethod*, mtGC> { G1CodeRootSetTable* volatile G1CodeRootSetTable::_purge_list = NULL;
friend class G1CodeRootSetTest;
typedef HashtableEntry<nmethod*, mtGC> Entry;
static CodeRootSetTable* volatile _purge_list; size_t G1CodeRootSetTable::mem_size() {
return sizeof(G1CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
CodeRootSetTable* _purge_next;
unsigned int compute_hash(nmethod* nm) {
uintptr_t hash = (uintptr_t)nm;
return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
}
void remove_entry(Entry* e, Entry* previous);
Entry* new_entry(nmethod* nm);
public:
CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
~CodeRootSetTable();
// Needs to be protected locks
bool add(nmethod* nm);
bool remove(nmethod* nm);
// Can be called without locking
bool contains(nmethod* nm);
int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
void copy_to(CodeRootSetTable* new_table);
void nmethods_do(CodeBlobClosure* blk);
template<typename CB>
int remove_if(CB& should_remove);
static void purge_list_append(CodeRootSetTable* tbl);
static void purge();
static size_t static_mem_size() {
return sizeof(_purge_list);
}
size_t mem_size();
};
CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
size_t CodeRootSetTable::mem_size() {
return sizeof(CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
} }
CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) { G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
unsigned int hash = compute_hash(nm); unsigned int hash = compute_hash(nm);
Entry* entry = (Entry*) new_entry_free_list(); Entry* entry = (Entry*) new_entry_free_list();
if (entry == NULL) { if (entry == NULL) {
@ -96,7 +52,7 @@ CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
return entry; return entry;
} }
void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) { void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
int index = hash_to_index(e->hash()); int index = hash_to_index(e->hash());
assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null"); assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
@ -108,7 +64,7 @@ void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
free_entry(e); free_entry(e);
} }
CodeRootSetTable::~CodeRootSetTable() { G1CodeRootSetTable::~G1CodeRootSetTable() {
for (int index = 0; index < table_size(); ++index) { for (int index = 0; index < table_size(); ++index) {
for (Entry* e = bucket(index); e != NULL; ) { for (Entry* e = bucket(index); e != NULL; ) {
Entry* to_remove = e; Entry* to_remove = e;
@ -125,7 +81,7 @@ CodeRootSetTable::~CodeRootSetTable() {
} }
} }
bool CodeRootSetTable::add(nmethod* nm) { bool G1CodeRootSetTable::add(nmethod* nm) {
if (!contains(nm)) { if (!contains(nm)) {
Entry* e = new_entry(nm); Entry* e = new_entry(nm);
int index = hash_to_index(e->hash()); int index = hash_to_index(e->hash());
@ -135,7 +91,7 @@ bool CodeRootSetTable::add(nmethod* nm) {
return false; return false;
} }
bool CodeRootSetTable::contains(nmethod* nm) { bool G1CodeRootSetTable::contains(nmethod* nm) {
int index = hash_to_index(compute_hash(nm)); int index = hash_to_index(compute_hash(nm));
for (Entry* e = bucket(index); e != NULL; e = e->next()) { for (Entry* e = bucket(index); e != NULL; e = e->next()) {
if (e->literal() == nm) { if (e->literal() == nm) {
@ -145,7 +101,7 @@ bool CodeRootSetTable::contains(nmethod* nm) {
return false; return false;
} }
bool CodeRootSetTable::remove(nmethod* nm) { bool G1CodeRootSetTable::remove(nmethod* nm) {
int index = hash_to_index(compute_hash(nm)); int index = hash_to_index(compute_hash(nm));
Entry* previous = NULL; Entry* previous = NULL;
for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) { for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
@ -157,7 +113,7 @@ bool CodeRootSetTable::remove(nmethod* nm) {
return false; return false;
} }
void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) { void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
for (int index = 0; index < table_size(); ++index) { for (int index = 0; index < table_size(); ++index) {
for (Entry* e = bucket(index); e != NULL; e = e->next()) { for (Entry* e = bucket(index); e != NULL; e = e->next()) {
new_table->add(e->literal()); new_table->add(e->literal());
@ -166,7 +122,7 @@ void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
new_table->copy_freelist(this); new_table->copy_freelist(this);
} }
void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) { void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
for (int index = 0; index < table_size(); ++index) { for (int index = 0; index < table_size(); ++index) {
for (Entry* e = bucket(index); e != NULL; e = e->next()) { for (Entry* e = bucket(index); e != NULL; e = e->next()) {
blk->do_code_blob(e->literal()); blk->do_code_blob(e->literal());
@ -175,7 +131,7 @@ void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
} }
template<typename CB> template<typename CB>
int CodeRootSetTable::remove_if(CB& should_remove) { int G1CodeRootSetTable::remove_if(CB& should_remove) {
int num_removed = 0; int num_removed = 0;
for (int index = 0; index < table_size(); ++index) { for (int index = 0; index < table_size(); ++index) {
Entry* previous = NULL; Entry* previous = NULL;
@ -198,52 +154,52 @@ G1CodeRootSet::~G1CodeRootSet() {
delete _table; delete _table;
} }
CodeRootSetTable* G1CodeRootSet::load_acquire_table() { G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table); return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
} }
void G1CodeRootSet::allocate_small_table() { void G1CodeRootSet::allocate_small_table() {
CodeRootSetTable* temp = new CodeRootSetTable(SmallSize); G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
OrderAccess::release_store_ptr(&_table, temp); OrderAccess::release_store_ptr(&_table, temp);
} }
void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) { void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
for (;;) { for (;;) {
table->_purge_next = _purge_list; table->_purge_next = _purge_list;
CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next); G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
if (old == table->_purge_next) { if (old == table->_purge_next) {
break; break;
} }
} }
} }
void CodeRootSetTable::purge() { void G1CodeRootSetTable::purge() {
CodeRootSetTable* table = _purge_list; G1CodeRootSetTable* table = _purge_list;
_purge_list = NULL; _purge_list = NULL;
while (table != NULL) { while (table != NULL) {
CodeRootSetTable* to_purge = table; G1CodeRootSetTable* to_purge = table;
table = table->_purge_next; table = table->_purge_next;
delete to_purge; delete to_purge;
} }
} }
void G1CodeRootSet::move_to_large() { void G1CodeRootSet::move_to_large() {
CodeRootSetTable* temp = new CodeRootSetTable(LargeSize); G1CodeRootSetTable* temp = new G1CodeRootSetTable(LargeSize);
_table->copy_to(temp); _table->copy_to(temp);
CodeRootSetTable::purge_list_append(_table); G1CodeRootSetTable::purge_list_append(_table);
OrderAccess::release_store_ptr(&_table, temp); OrderAccess::release_store_ptr(&_table, temp);
} }
void G1CodeRootSet::purge() { void G1CodeRootSet::purge() {
CodeRootSetTable::purge(); G1CodeRootSetTable::purge();
} }
size_t G1CodeRootSet::static_mem_size() { size_t G1CodeRootSet::static_mem_size() {
return CodeRootSetTable::static_mem_size(); return G1CodeRootSetTable::static_mem_size();
} }
void G1CodeRootSet::add(nmethod* method) { void G1CodeRootSet::add(nmethod* method) {
@ -278,7 +234,7 @@ bool G1CodeRootSet::remove(nmethod* method) {
} }
bool G1CodeRootSet::contains(nmethod* method) { bool G1CodeRootSet::contains(nmethod* method) {
CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync. G1CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
if (table != NULL) { if (table != NULL) {
return table->contains(method); return table->contains(method);
} }
@ -348,67 +304,3 @@ void G1CodeRootSet::clean(HeapRegion* owner) {
clear(); clear();
} }
} }
#ifndef PRODUCT
class G1CodeRootSetTest {
public:
static void test() {
{
G1CodeRootSet set1;
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
"The code root set's static memory usage is incorrect, " SIZE_FORMAT " bytes", G1CodeRootSet::static_mem_size());
set1.add((nmethod*)1);
assert(set1.length() == 1, "Added exactly one element, but set contains "
SIZE_FORMAT " elements", set1.length());
const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
for (size_t i = 1; i <= num_to_add; i++) {
set1.add((nmethod*)1);
}
assert(set1.length() == 1,
"Duplicate detection should not have increased the set size but "
"is " SIZE_FORMAT, set1.length());
for (size_t i = 2; i <= num_to_add; i++) {
set1.add((nmethod*)(uintptr_t)(i));
}
assert(set1.length() == num_to_add,
"After adding in total " SIZE_FORMAT " distinct code roots, they "
"need to be in the set, but there are only " SIZE_FORMAT,
num_to_add, set1.length());
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
size_t num_popped = 0;
for (size_t i = 1; i <= num_to_add; i++) {
bool removed = set1.remove((nmethod*)i);
if (removed) {
num_popped += 1;
} else {
break;
}
}
assert(num_popped == num_to_add,
"Managed to pop " SIZE_FORMAT " code roots, but only " SIZE_FORMAT " "
"were added", num_popped, num_to_add);
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
G1CodeRootSet::purge();
assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
}
}
};
void TestCodeCacheRemSet_test() {
G1CodeRootSetTest::test();
}
#endif

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
class CodeBlobClosure; class CodeBlobClosure;
class CodeRootSetTable; class G1CodeRootSetTable;
class HeapRegion; class HeapRegion;
class nmethod; class nmethod;
@ -42,8 +42,8 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
const static size_t Threshold = 24; const static size_t Threshold = 24;
const static size_t LargeSize = 512; const static size_t LargeSize = 512;
CodeRootSetTable* _table; G1CodeRootSetTable* _table;
CodeRootSetTable* load_acquire_table(); G1CodeRootSetTable* load_acquire_table();
size_t _length; size_t _length;

View file

@ -0,0 +1,76 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP
#define SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP
#include "utilities/hashtable.hpp"
class nmethod;
class G1CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
friend class G1CodeRootSetTest;
typedef HashtableEntry<nmethod*, mtGC> Entry;
static G1CodeRootSetTable* volatile _purge_list;
G1CodeRootSetTable* _purge_next;
unsigned int compute_hash(nmethod* nm) {
uintptr_t hash = (uintptr_t)nm;
return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
}
void remove_entry(Entry* e, Entry* previous);
Entry* new_entry(nmethod* nm);
public:
G1CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
~G1CodeRootSetTable();
// Needs to be protected by locks
bool add(nmethod* nm);
bool remove(nmethod* nm);
// Can be called without locking
bool contains(nmethod* nm);
int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
void copy_to(G1CodeRootSetTable* new_table);
void nmethods_do(CodeBlobClosure* blk);
template<typename CB>
int remove_if(CB& should_remove);
static void purge_list_append(G1CodeRootSetTable* tbl);
static void purge();
static size_t static_mem_size() {
return sizeof(_purge_list);
}
size_t mem_size();
};
#endif /* SHARE_VM_GC_G1_G1CODEROOTSETTABLE_HPP */

View file

@ -1332,6 +1332,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
workers()->active_workers(), workers()->active_workers(),
Threads::number_of_non_daemon_threads()); Threads::number_of_non_daemon_threads());
workers()->update_active_workers(n_workers); workers()->update_active_workers(n_workers);
log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
ParRebuildRSTask rebuild_rs_task(this); ParRebuildRSTask rebuild_rs_task(this);
workers()->run_task(&rebuild_rs_task); workers()->run_task(&rebuild_rs_task);
@ -1478,7 +1479,7 @@ void G1CollectedHeap::resize_if_necessary_after_full_collection() {
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
expand(expand_bytes); expand(expand_bytes, _workers);
// No expansion, now see if we want to shrink // No expansion, now see if we want to shrink
} else if (capacity_after_gc > maximum_desired_capacity) { } else if (capacity_after_gc > maximum_desired_capacity) {
@ -1598,7 +1599,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationConte
word_size * HeapWordSize); word_size * HeapWordSize);
if (expand(expand_bytes)) { if (expand(expand_bytes, _workers)) {
_hrm.verify_optional(); _hrm.verify_optional();
_verifier->verify_region_sets_optional(); _verifier->verify_region_sets_optional();
return attempt_allocation_at_safepoint(word_size, return attempt_allocation_at_safepoint(word_size,
@ -1608,7 +1609,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationConte
return NULL; return NULL;
} }
bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) { bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_size_up(aligned_expand_bytes, aligned_expand_bytes = align_size_up(aligned_expand_bytes,
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
@ -1625,7 +1626,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region"); assert(regions_to_expand > 0, "Must expand by at least one region");
uint expanded_by = _hrm.expand_by(regions_to_expand); uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
if (expand_time_ms != NULL) { if (expand_time_ms != NULL) {
*expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS; *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
} }
@ -1926,7 +1927,7 @@ jint G1CollectedHeap::initialize() {
_cmThread = _cm->cmThread(); _cmThread = _cm->cmThread();
// Now expand into the initial heap size. // Now expand into the initial heap size.
if (!expand(init_byte_size)) { if (!expand(init_byte_size, _workers)) {
vm_shutdown_during_initialization("Failed to allocate initial heap."); vm_shutdown_during_initialization("Failed to allocate initial heap.");
return JNI_ENOMEM; return JNI_ENOMEM;
} }
@ -3068,6 +3069,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
workers()->active_workers(), workers()->active_workers(),
Threads::number_of_non_daemon_threads()); Threads::number_of_non_daemon_threads());
workers()->update_active_workers(active_workers); workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
@ -3163,7 +3165,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table."); assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
_cm->note_start_of_gc();
// We call this after finalize_cset() to // We call this after finalize_cset() to
// ensure that the CSet has been finalized. // ensure that the CSet has been finalized.
_cm->verify_no_cset_oops(); _cm->verify_no_cset_oops();
@ -3239,7 +3240,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// No need for an ergo logging here, // No need for an ergo logging here,
// expansion_amount() does this when it returns a value > 0. // expansion_amount() does this when it returns a value > 0.
double expand_ms; double expand_ms;
if (!expand(expand_bytes, &expand_ms)) { if (!expand(expand_bytes, _workers, &expand_ms)) {
// We failed to expand the heap. Cannot do anything about it. // We failed to expand the heap. Cannot do anything about it.
} }
g1_policy()->phase_times()->record_expand_heap_time(expand_ms); g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
@ -3249,7 +3250,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We redo the verification but now wrt to the new CSet which // We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed. // has just got initialized after the previous CSet was freed.
_cm->verify_no_cset_oops(); _cm->verify_no_cset_oops();
_cm->note_end_of_gc();
// This timing is only used by the ergonomics to handle our pause target. // This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will // It is unclear why this should not include the full pause. We will
@ -4513,6 +4513,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
#if defined(COMPILER2) || INCLUDE_JVMCI #if defined(COMPILER2) || INCLUDE_JVMCI
DerivedPointerTable::update_pointers(); DerivedPointerTable::update_pointers();
#endif #endif
g1_policy()->print_age_table();
} }
void G1CollectedHeap::record_obj_copy_mem_stats() { void G1CollectedHeap::record_obj_copy_mem_stats() {

View file

@ -557,7 +557,7 @@ public:
// Returns true if the heap was expanded by the requested amount; // Returns true if the heap was expanded by the requested amount;
// false otherwise. // false otherwise.
// (Rounds up to a HeapRegion boundary.) // (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes, double* expand_time_ms = NULL); bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
// Returns the PLAB statistics for a given destination. // Returns the PLAB statistics for a given destination.
inline G1EvacStats* alloc_buffer_stats(InCSetState dest); inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
@ -1273,12 +1273,6 @@ public:
return true; return true;
} }
// The reference pending list lock is acquired from from the
// ConcurrentMarkThread.
virtual bool needs_reference_pending_list_locker_thread() const {
return true;
}
inline bool is_in_young(const oop obj); inline bool is_in_young(const oop obj);
virtual bool is_scavengable(const void* addr); virtual bool is_scavengable(const void* addr);

View file

@ -32,6 +32,7 @@
#include "gc/g1/heapRegionSet.hpp" #include "gc/g1/heapRegionSet.hpp"
#include "logging/logStream.hpp" #include "logging/logStream.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/quickSort.hpp"
G1CollectorState* G1CollectionSet::collector_state() { G1CollectorState* G1CollectionSet::collector_state() {
return _g1->collector_state(); return _g1->collector_state();
@ -396,6 +397,16 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
return time_remaining_ms; return time_remaining_ms;
} }
static int compare_region_idx(const uint a, const uint b) {
if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return -1;
}
}
void G1CollectionSet::finalize_old_part(double time_remaining_ms) { void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime(); double non_young_start_time_sec = os::elapsedTime();
double predicted_old_time_ms = 0.0; double predicted_old_time_ms = 0.0;
@ -493,6 +504,8 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_end_time_sec = os::elapsedTime(); double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
QuickSort::sort<uint>(_collection_set_regions, (int)_collection_set_cur_length, compare_region_idx, true);
} }
#ifdef ASSERT #ifdef ASSERT

View file

@ -133,129 +133,184 @@ void G1CMBitMap::clear_range(MemRegion mr) {
} }
G1CMMarkStack::G1CMMarkStack() : G1CMMarkStack::G1CMMarkStack() :
_reserved_space(), _max_chunk_capacity(0),
_base(NULL), _base(NULL),
_capacity(0), _chunk_capacity(0),
_saved_index((size_t)AllBits), _out_of_memory(false),
_should_expand(false) { _should_expand(false) {
set_empty(); set_empty();
} }
bool G1CMMarkStack::resize(size_t new_capacity) { bool G1CMMarkStack::resize(size_t new_capacity) {
assert(is_empty(), "Only resize when stack is empty."); assert(is_empty(), "Only resize when stack is empty.");
assert(new_capacity <= MarkStackSizeMax, assert(new_capacity <= _max_chunk_capacity,
"Trying to resize stack to " SIZE_FORMAT " elements when the maximum is " SIZE_FORMAT, new_capacity, MarkStackSizeMax); "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
size_t reservation_size = ReservedSpace::allocation_align_size_up(new_capacity * sizeof(oop)); OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
ReservedSpace rs(reservation_size); if (new_base == NULL) {
if (!rs.is_reserved()) { log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " elements and size " SIZE_FORMAT "B.", new_capacity, reservation_size);
return false; return false;
} }
VirtualSpace vs;
if (!vs.initialize(rs, rs.size())) {
rs.release();
log_warning(gc)("Failed to commit memory for new overflow mark stack of size " SIZE_FORMAT "B.", rs.size());
return false;
}
assert(vs.committed_size() == rs.size(), "Failed to commit all of the mark stack.");
// Release old mapping. // Release old mapping.
_reserved_space.release(); if (_base != NULL) {
MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
}
// Save new mapping for future unmapping. _base = new_base;
_reserved_space = rs; _chunk_capacity = new_capacity;
MemTracker::record_virtual_memory_type((address)_reserved_space.base(), mtGC);
_base = (oop*) vs.low();
_capacity = new_capacity;
set_empty(); set_empty();
_should_expand = false; _should_expand = false;
return true; return true;
} }
bool G1CMMarkStack::allocate(size_t capacity) { size_t G1CMMarkStack::capacity_alignment() {
return resize(capacity); return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*);
}
bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*);
_max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
guarantee(initial_chunk_capacity <= _max_chunk_capacity,
"Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
_max_chunk_capacity,
initial_chunk_capacity);
log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
initial_chunk_capacity, _max_chunk_capacity);
return resize(initial_chunk_capacity);
} }
void G1CMMarkStack::expand() { void G1CMMarkStack::expand() {
// Clear expansion flag // Clear expansion flag
_should_expand = false; _should_expand = false;
if (_capacity == MarkStackSizeMax) { if (_chunk_capacity == _max_chunk_capacity) {
log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " elements.", _capacity); log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
return; return;
} }
size_t old_capacity = _capacity; size_t old_capacity = _chunk_capacity;
// Double capacity if possible // Double capacity if possible
size_t new_capacity = MIN2(old_capacity * 2, MarkStackSizeMax); size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
if (resize(new_capacity)) { if (resize(new_capacity)) {
log_debug(gc)("Expanded marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements", log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
old_capacity, new_capacity); old_capacity, new_capacity);
} else { } else {
log_warning(gc)("Failed to expand marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements", log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
old_capacity, new_capacity); old_capacity, new_capacity);
} }
} }
G1CMMarkStack::~G1CMMarkStack() { G1CMMarkStack::~G1CMMarkStack() {
if (_base != NULL) { if (_base != NULL) {
_base = NULL; MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
_reserved_space.release();
} }
} }
void G1CMMarkStack::par_push_arr(oop* buffer, size_t n) { void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); elem->next = *list;
size_t start = _index; *list = elem;
size_t next_index = start + n;
if (next_index > _capacity) {
_overflow = true;
return;
}
// Otherwise.
_index = next_index;
for (size_t i = 0; i < n; i++) {
size_t ind = start + i;
assert(ind < _capacity, "By overflow test above.");
_base[ind] = buffer[i];
}
} }
bool G1CMMarkStack::par_pop_arr(oop* buffer, size_t max, size_t* n) { void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
size_t index = _index; add_chunk_to_list(&_chunk_list, elem);
if (index == 0) { _chunks_in_chunk_list++;
*n = 0; }
void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
add_chunk_to_list(&_free_list, elem);
}
G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
OopChunk* result = *list;
if (result != NULL) {
*list = (*list)->next;
}
return result;
}
G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
OopChunk* result = remove_chunk_from_list(&_chunk_list);
if (result != NULL) {
_chunks_in_chunk_list--;
}
return result;
}
G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
return remove_chunk_from_list(&_free_list);
}
G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
// This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
// Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
// wraparound of _hwm.
if (_hwm >= _chunk_capacity) {
return NULL;
}
size_t cur_idx = Atomic::add(1, &_hwm) - 1;
if (cur_idx >= _chunk_capacity) {
return NULL;
}
OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
result->next = NULL;
return result;
}
bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
// Get a new chunk.
OopChunk* new_chunk = remove_chunk_from_free_list();
if (new_chunk == NULL) {
// Did not get a chunk from the free list. Allocate from backing memory.
new_chunk = allocate_new_chunk();
}
if (new_chunk == NULL) {
_out_of_memory = true;
return false; return false;
} else {
size_t k = MIN2(max, index);
size_t new_ind = index - k;
for (size_t j = 0; j < k; j++) {
buffer[j] = _base[new_ind + j];
}
_index = new_ind;
*n = k;
return true;
} }
Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop));
add_chunk_to_chunk_list(new_chunk);
return true;
} }
void G1CMMarkStack::note_start_of_gc() { bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
assert(_saved_index == (size_t)AllBits, "note_start_of_gc()/end_of_gc() calls bracketed incorrectly"); OopChunk* cur = remove_chunk_from_chunk_list();
_saved_index = _index;
if (cur == NULL) {
return false;
}
Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop));
add_chunk_to_free_list(cur);
return true;
} }
void G1CMMarkStack::note_end_of_gc() { void G1CMMarkStack::set_empty() {
guarantee(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index); _chunks_in_chunk_list = 0;
_hwm = 0;
_saved_index = (size_t)AllBits; clear_out_of_memory();
_chunk_list = NULL;
_free_list = NULL;
} }
G1CMRootRegions::G1CMRootRegions() : G1CMRootRegions::G1CMRootRegions() :
@ -483,9 +538,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
} }
} }
if (!_global_mark_stack.allocate(MarkStackSize)) { if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
return;
} }
_tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
@ -1035,6 +1089,8 @@ void G1ConcurrentMark::mark_from_roots() {
// worker threads may currently exist and more may not be // worker threads may currently exist and more may not be
// available. // available.
active_workers = _parallel_workers->update_active_workers(active_workers); active_workers = _parallel_workers->update_active_workers(active_workers);
log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
// Parallel task terminator is set in "set_concurrency_and_phase()" // Parallel task terminator is set in "set_concurrency_and_phase()"
set_concurrency_and_phase(active_workers, true /* concurrent */); set_concurrency_and_phase(active_workers, true /* concurrent */);
@ -1693,10 +1749,10 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// oop closures will set the has_overflown flag if we overflow the // oop closures will set the has_overflown flag if we overflow the
// global marking stack. // global marking stack.
assert(_global_mark_stack.overflow() || _global_mark_stack.is_empty(), assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
"mark stack should be empty (unless it overflowed)"); "Mark stack should be empty (unless it is out of memory)");
if (_global_mark_stack.overflow()) { if (_global_mark_stack.is_out_of_memory()) {
// This should have been done already when we tried to push an // This should have been done already when we tried to push an
// entry on to the global mark stack. But let's do it again. // entry on to the global mark stack. But let's do it again.
set_has_overflown(); set_has_overflown();
@ -1902,7 +1958,8 @@ G1ConcurrentMark::claim_region(uint worker_id) {
assert(_g1h->is_in_g1_reserved(finger), "invariant"); assert(_g1h->is_in_g1_reserved(finger), "invariant");
HeapRegion* curr_region = _g1h->heap_region_containing(finger); HeapRegion* curr_region = _g1h->heap_region_containing(finger);
// Make sure that the reads below do not float before loading curr_region.
OrderAccess::loadload();
// Above heap_region_containing may return NULL as we always scan claim // Above heap_region_containing may return NULL as we always scan claim
// until the end of the heap. In this case, just jump to the next region. // until the end of the heap. In this case, just jump to the next region.
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
@ -2340,49 +2397,54 @@ void G1CMTask::decrease_limits() {
} }
void G1CMTask::move_entries_to_global_stack() { void G1CMTask::move_entries_to_global_stack() {
// local array where we'll store the entries that will be popped // Local array where we'll store the entries that will be popped
// from the local queue // from the local queue.
oop buffer[global_stack_transfer_size]; oop buffer[G1CMMarkStack::OopsPerChunk];
int n = 0; size_t n = 0;
oop obj; oop obj;
while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) {
buffer[n] = obj; buffer[n] = obj;
++n; ++n;
} }
if (n < G1CMMarkStack::OopsPerChunk) {
buffer[n] = NULL;
}
if (n > 0) { if (n > 0) {
// we popped at least one entry from the local queue if (!_cm->mark_stack_push(buffer)) {
if (!_cm->mark_stack_push(buffer, n)) {
set_has_aborted(); set_has_aborted();
} }
} }
// this operation was quite expensive, so decrease the limits // This operation was quite expensive, so decrease the limits.
decrease_limits(); decrease_limits();
} }
void G1CMTask::get_entries_from_global_stack() { bool G1CMTask::get_entries_from_global_stack() {
// local array where we'll store the entries that will be popped // Local array where we'll store the entries that will be popped
// from the global stack. // from the global stack.
oop buffer[global_stack_transfer_size]; oop buffer[G1CMMarkStack::OopsPerChunk];
size_t n;
_cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); if (!_cm->mark_stack_pop(buffer)) {
assert(n <= global_stack_transfer_size, return false;
"we should not pop more than the given limit");
if (n > 0) {
// yes, we did actually pop at least one entry
for (size_t i = 0; i < n; ++i) {
bool success = _task_queue->push(buffer[i]);
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.
assert(success, "invariant");
}
} }
// this operation was quite expensive, so decrease the limits // We did actually pop at least one entry.
for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) {
oop elem = buffer[i];
if (elem == NULL) {
break;
}
bool success = _task_queue->push(elem);
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.
assert(success, "invariant");
}
// This operation was quite expensive, so decrease the limits
decrease_limits(); decrease_limits();
return true;
} }
void G1CMTask::drain_local_queue(bool partially) { void G1CMTask::drain_local_queue(bool partially) {
@ -2426,20 +2488,21 @@ void G1CMTask::drain_global_stack(bool partially) {
// Decide what the target size is, depending whether we're going to // Decide what the target size is, depending whether we're going to
// drain it partially (so that other tasks can steal if they run out // drain it partially (so that other tasks can steal if they run out
// of things to do) or totally (at the very end). Notice that, // of things to do) or totally (at the very end).
// because we move entries from the global stack in chunks or // Notice that when draining the global mark stack partially, due to the racyness
// because another task might be doing the same, we might in fact // of the mark stack size update we might in fact drop below the target. But,
// drop below the target. But, this is not a problem. // this is not a problem.
size_t target_size; // In case of total draining, we simply process until the global mark stack is
// totally empty, disregarding the size counter.
if (partially) { if (partially) {
target_size = _cm->partial_mark_stack_size_target(); size_t const target_size = _cm->partial_mark_stack_size_target();
} else {
target_size = 0;
}
if (_cm->mark_stack_size() > target_size) {
while (!has_aborted() && _cm->mark_stack_size() > target_size) { while (!has_aborted() && _cm->mark_stack_size() > target_size) {
get_entries_from_global_stack(); if (get_entries_from_global_stack()) {
drain_local_queue(partially);
}
}
} else {
while (!has_aborted() && get_entries_from_global_stack()) {
drain_local_queue(partially); drain_local_queue(partially);
} }
} }

View file

@ -149,42 +149,98 @@ class G1CMBitMap : public G1CMBitMapRO {
// //
// Stores oops in a huge buffer in virtual memory that is always fully committed. // Stores oops in a huge buffer in virtual memory that is always fully committed.
// Resizing may only happen during a STW pause when the stack is empty. // Resizing may only happen during a STW pause when the stack is empty.
//
// Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
// stack memory is split into evenly sized chunks of oops. Users can only
// add or remove entries on that basis.
// Chunks are filled in increasing address order. Not completely filled chunks
// have a NULL element as a terminating element.
//
// Every chunk has a header containing a single pointer element used for memory
// management. This wastes some space, but is negligible (< .1% with current sizing).
//
// Memory management is done using a mix of tracking a high water-mark indicating
// that all chunks at a lower address are valid chunks, and a singly linked free
// list connecting all empty chunks.
class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
ReservedSpace _reserved_space; // Space currently reserved for the mark stack. public:
// Number of oops that can fit in a single chunk.
static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
private:
struct OopChunk {
OopChunk* next;
oop data[OopsPerChunk];
};
oop* _base; // Bottom address of allocated memory area. size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack.
size_t _capacity; // Maximum number of elements.
size_t _index; // One more than last occupied index.
size_t _saved_index; // Value of _index saved at start of GC to detect mark stack modifications during that time. OopChunk* _base; // Bottom address of allocated memory area.
size_t _chunk_capacity; // Current maximum number of OopChunk elements.
char _pad0[DEFAULT_CACHE_LINE_SIZE];
OopChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
OopChunk* volatile _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
volatile size_t _hwm; // High water mark within the reserved space.
char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
// Allocate a new chunk from the reserved memory, using the high water mark. Returns
// NULL if out of memory.
OopChunk* allocate_new_chunk();
volatile bool _out_of_memory;
// Atomically add the given chunk to the list.
void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
// Atomically remove and return a chunk from the given list. Returns NULL if the
// list is empty.
OopChunk* remove_chunk_from_list(OopChunk* volatile* list);
void add_chunk_to_chunk_list(OopChunk* elem);
void add_chunk_to_free_list(OopChunk* elem);
OopChunk* remove_chunk_from_chunk_list();
OopChunk* remove_chunk_from_free_list();
bool _overflow;
bool _should_expand; bool _should_expand;
// Resizes the mark stack to the given new capacity. Releases any previous // Resizes the mark stack to the given new capacity. Releases any previous
// memory if successful. // memory if successful.
bool resize(size_t new_capacity); bool resize(size_t new_capacity);
bool stack_modified() const { return _index != _saved_index; }
public: public:
G1CMMarkStack(); G1CMMarkStack();
~G1CMMarkStack(); ~G1CMMarkStack();
bool allocate(size_t capacity); // Alignment and minimum capacity of this mark stack in number of oops.
static size_t capacity_alignment();
// Pushes the first "n" elements of the given buffer on the stack. // Allocate and initialize the mark stack with the given number of oops.
void par_push_arr(oop* buffer, size_t n); bool initialize(size_t initial_capacity, size_t max_capacity);
// Moves up to max elements from the stack into the given buffer. Returns // Pushes the given buffer containing at most OopsPerChunk elements on the mark
// the number of elements pushed, and false if the array has been empty. // stack. If less than OopsPerChunk elements are to be pushed, the array must
// Returns true if the buffer contains at least one element. // be terminated with a NULL.
bool par_pop_arr(oop* buffer, size_t max, size_t* n); // Returns whether the buffer contents were successfully pushed to the global mark
// stack.
bool par_push_chunk(oop* buffer);
bool is_empty() const { return _index == 0; } // Pops a chunk from this mark stack, copying them into the given buffer. This
size_t capacity() const { return _capacity; } // chunk may contain up to OopsPerChunk elements. If there are less, the last
// element in the array is a NULL pointer.
bool par_pop_chunk(oop* buffer);
bool overflow() const { return _overflow; } // Return whether the chunk list is empty. Racy due to unsynchronized access to
void clear_overflow() { _overflow = false; } // _chunk_list.
bool is_empty() const { return _chunk_list == NULL; }
size_t capacity() const { return _chunk_capacity; }
bool is_out_of_memory() const { return _out_of_memory; }
void clear_out_of_memory() { _out_of_memory = false; }
bool should_expand() const { return _should_expand; } bool should_expand() const { return _should_expand; }
void set_should_expand(bool value) { _should_expand = value; } void set_should_expand(bool value) { _should_expand = value; }
@ -192,20 +248,15 @@ class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
// Expand the stack, typically in response to an overflow condition // Expand the stack, typically in response to an overflow condition
void expand(); void expand();
size_t size() const { return _index; } // Return the approximate number of oops on this mark stack. Racy due to
// unsynchronized access to _chunks_in_chunk_list.
size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }
void set_empty() { _index = 0; clear_overflow(); } void set_empty();
// Record the current index. // Apply Fn to every oop on the mark stack. The mark stack must not
void note_start_of_gc();
// Make sure that we have not added any entries to the stack during GC.
void note_end_of_gc();
// Apply fn to each oop in the mark stack, up to the bound recorded
// via one of the above "note" functions. The mark stack must not
// be modified while iterating. // be modified while iterating.
template<typename Fn> void iterate(Fn fn); template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
}; };
// Root Regions are regions that are not empty at the beginning of a // Root Regions are regions that are not empty at the beginning of a
@ -278,7 +329,6 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
friend class G1CMDrainMarkingStackClosure; friend class G1CMDrainMarkingStackClosure;
friend class G1CMBitMapClosure; friend class G1CMBitMapClosure;
friend class G1CMConcurrentMarkingTask; friend class G1CMConcurrentMarkingTask;
friend class G1CMMarkStack;
friend class G1CMRemarkTask; friend class G1CMRemarkTask;
friend class G1CMTask; friend class G1CMTask;
@ -479,22 +529,20 @@ protected:
public: public:
// Manipulation of the global mark stack. // Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers // The push and pop operations are used by tasks for transfers
// between task-local queues and the global mark stack, and use // between task-local queues and the global mark stack.
// locking for concurrency safety. bool mark_stack_push(oop* arr) {
bool mark_stack_push(oop* arr, size_t n) { if (!_global_mark_stack.par_push_chunk(arr)) {
_global_mark_stack.par_push_arr(arr, n);
if (_global_mark_stack.overflow()) {
set_has_overflown(); set_has_overflown();
return false; return false;
} }
return true; return true;
} }
void mark_stack_pop(oop* arr, size_t max, size_t* n) { bool mark_stack_pop(oop* arr) {
_global_mark_stack.par_pop_arr(arr, max, n); return _global_mark_stack.par_pop_chunk(arr);
} }
size_t mark_stack_size() { return _global_mark_stack.size(); } size_t mark_stack_size() { return _global_mark_stack.size(); }
size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; } size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
bool mark_stack_overflow() { return _global_mark_stack.overflow(); } bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); }
bool mark_stack_empty() { return _global_mark_stack.is_empty(); } bool mark_stack_empty() { return _global_mark_stack.is_empty(); }
G1CMRootRegions* root_regions() { return &_root_regions; } G1CMRootRegions* root_regions() { return &_root_regions; }
@ -599,16 +647,6 @@ public:
// read-only, so use this carefully! // read-only, so use this carefully!
void clearRangePrevBitmap(MemRegion mr); void clearRangePrevBitmap(MemRegion mr);
// Notify data structures that a GC has started.
void note_start_of_gc() {
_global_mark_stack.note_start_of_gc();
}
// Notify data structures that a GC is finished.
void note_end_of_gc() {
_global_mark_stack.note_end_of_gc();
}
// Verify that there are no CSet oops on the stacks (taskqueues / // Verify that there are no CSet oops on the stacks (taskqueues /
// global mark stack) and fingers (global / per-task). // global mark stack) and fingers (global / per-task).
// If marking is not in progress, it's a no-op. // If marking is not in progress, it's a no-op.
@ -670,10 +708,7 @@ private:
// references reaches this limit // references reaches this limit
refs_reached_period = 384, refs_reached_period = 384,
// Initial value for the hash seed, used in the work stealing code // Initial value for the hash seed, used in the work stealing code
init_hash_seed = 17, init_hash_seed = 17
// How many entries will be transferred between global stack and
// local queues at once.
global_stack_transfer_size = 1024
}; };
uint _worker_id; uint _worker_id;
@ -858,9 +893,10 @@ public:
// It pushes an object on the local queue. // It pushes an object on the local queue.
inline void push(oop obj); inline void push(oop obj);
// These two move entries to/from the global stack. // Move entries to the global stack.
void move_entries_to_global_stack(); void move_entries_to_global_stack();
void get_entries_from_global_stack(); // Move entries from the global stack, return true if we were successful to do so.
bool get_entries_from_global_stack();
// It pops and scans objects from the local queue. If partially is // It pops and scans objects from the local queue. If partially is
// true, then it stops when the queue size is of a given limit. If // true, then it stops when the queue size is of a given limit. If

View file

@ -89,14 +89,28 @@ inline bool G1CMBitMap::parMark(HeapWord* addr) {
#undef check_mark #undef check_mark
#ifndef PRODUCT
template<typename Fn> template<typename Fn>
inline void G1CMMarkStack::iterate(Fn fn) { inline void G1CMMarkStack::iterate(Fn fn) const {
assert_at_safepoint(true); assert_at_safepoint(true);
assert(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index);
for (size_t i = 0; i < _index; ++i) { size_t num_chunks = 0;
fn(_base[i]);
OopChunk* cur = _chunk_list;
while (cur != NULL) {
guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks);
for (size_t i = 0; i < OopsPerChunk; ++i) {
if (cur->data[i] == NULL) {
break;
}
fn(cur->data[i]);
}
cur = cur->next;
num_chunks++;
} }
} }
#endif
// It scans an object and visits its children. // It scans an object and visits its children.
inline void G1CMTask::scan_object(oop obj) { process_grey_object<true>(obj); } inline void G1CMTask::scan_object(oop obj) { process_grey_object<true>(obj); }

View file

@ -885,6 +885,15 @@ bool G1DefaultPolicy::adaptive_young_list_length() const {
return _young_gen_sizer.adaptive_young_list_length(); return _young_gen_sizer.adaptive_young_list_length();
} }
size_t G1DefaultPolicy::desired_survivor_size() const {
size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
}
void G1DefaultPolicy::print_age_table() {
_survivors_age_table.print_age_table(_tenuring_threshold);
}
void G1DefaultPolicy::update_max_gc_locker_expansion() { void G1DefaultPolicy::update_max_gc_locker_expansion() {
uint expansion_region_num = 0; uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) { if (GCLockerEdenExpansionPercent > 0) {
@ -908,8 +917,11 @@ void G1DefaultPolicy::update_survivors_policy() {
// smaller than 1.0) we'll get 1. // smaller than 1.0) we'll get 1.
_max_survivor_regions = (uint) ceil(max_survivor_regions_d); _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
HeapRegion::GrainWords * _max_survivor_regions, _policy_counters); if (UsePerfData) {
_policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
_policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
}
} }
bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {

View file

@ -360,6 +360,8 @@ private:
AgeTable _survivors_age_table; AgeTable _survivors_age_table;
protected:
size_t desired_survivor_size() const;
public: public:
uint tenuring_threshold() const { return _tenuring_threshold; } uint tenuring_threshold() const { return _tenuring_threshold; }
@ -379,6 +381,8 @@ public:
_survivors_age_table.merge(age_table); _survivors_age_table.merge(age_table);
} }
void print_age_table();
void update_max_gc_locker_expansion(); void update_max_gc_locker_expansion();
void update_survivors_policy(); void update_survivors_policy();

View file

@ -132,9 +132,16 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
{ {
G1RootProcessor root_processor(g1h, 1); G1RootProcessor root_processor(g1h, 1);
root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, if (ClassUnloading) {
&GenMarkSweep::follow_cld_closure, root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
&follow_code_closure); &GenMarkSweep::follow_cld_closure,
&follow_code_closure);
} else {
root_processor.process_all_roots_no_string_table(
&GenMarkSweep::follow_root_closure,
&GenMarkSweep::follow_cld_closure,
&follow_code_closure);
}
} }
{ {
@ -157,7 +164,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// This is the point where the entire marking should have completed. // This is the point where the entire marking should have completed.
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
{ if (ClassUnloading) {
GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer()); GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
// Unload classes and purge the SystemDictionary. // Unload classes and purge the SystemDictionary.

View file

@ -34,7 +34,6 @@ class G1RemSet;
class G1ConcurrentMark; class G1ConcurrentMark;
class DirtyCardToOopClosure; class DirtyCardToOopClosure;
class G1CMBitMap; class G1CMBitMap;
class G1CMMarkStack;
class G1ParScanThreadState; class G1ParScanThreadState;
class G1CMTask; class G1CMTask;
class ReferenceProcessor; class ReferenceProcessor;

View file

@ -24,8 +24,10 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1PageBasedVirtualSpace.hpp" #include "gc/g1/g1PageBasedVirtualSpace.hpp"
#include "gc/shared/workgroup.hpp"
#include "oops/markOop.hpp" #include "oops/markOop.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.inline.hpp" #include "runtime/os.inline.hpp"
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
@ -177,7 +179,7 @@ void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_pa
guarantee(start_page < end_page, guarantee(start_page < end_page,
"Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page)); os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page), _page_size);
} }
bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) { bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
@ -198,9 +200,6 @@ bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
} }
_committed.set_range(start_page, end_page); _committed.set_range(start_page, end_page);
if (AlwaysPreTouch) {
pretouch_internal(start_page, end_page);
}
return zero_filled; return zero_filled;
} }
@ -227,6 +226,53 @@ void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages)
_committed.clear_range(start_page, end_page); _committed.clear_range(start_page, end_page);
} }
class G1PretouchTask : public AbstractGangTask {
private:
char* volatile _cur_addr;
char* const _start_addr;
char* const _end_addr;
size_t const _page_size;
public:
G1PretouchTask(char* start_address, char* end_address, size_t page_size) :
AbstractGangTask("G1 PreTouch",
Universe::is_fully_initialized() ? GCId::current_raw() :
// During VM initialization there is
// no GC cycle that this task can be
// associated with.
GCId::undefined()),
_cur_addr(start_address),
_start_addr(start_address),
_end_addr(end_address),
_page_size(page_size) {
}
virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) {
char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}
char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));
os::pretouch_memory(touch_addr, end_addr, _page_size);
}
}
static size_t chunk_size() { return PreTouchParallelChunkSize; }
};
void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) {
guarantee(pretouch_gang != NULL, "No pretouch gang specified.");
size_t num_chunks = MAX2((size_t)1, size_in_pages * _page_size / MAX2(G1PretouchTask::chunk_size(), _page_size));
uint num_workers = MIN2((uint)num_chunks, pretouch_gang->active_workers());
G1PretouchTask cl(page_start(start_page), bounded_end_addr(start_page + size_in_pages), _page_size);
log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.",
cl.name(), num_workers, num_chunks, size_in_pages * _page_size);
pretouch_gang->run_task(&cl, num_workers);
}
bool G1PageBasedVirtualSpace::contains(const void* p) const { bool G1PageBasedVirtualSpace::contains(const void* p) const {
return _low_boundary <= (const char*) p && (const char*) p < _high_boundary; return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
} }

Some files were not shown because too many files have changed in this diff Show more