This commit is contained in:
Jesper Wilhelmsson 2020-01-08 16:03:32 +01:00
commit 257a1bb854
54 changed files with 1071 additions and 308 deletions

View file

@ -604,5 +604,7 @@ c16ac7a2eba4e73cb4f7ee9294dd647860eebff0 jdk-14+21
63e17cf29bed191ea21020b4648c9cdf893f80f5 jdk-15+1
2069b4bfd23b56b6fc659fba8b75aaaa23debbe0 jdk-14+28
f33197adda9ad82fdef46ac0f7dc0126204f35b2 jdk-15+2
563fa900fa17c290ae516c7a3a69e8c069dde304 jdk-14+29
d05fcdf25717d85e80a3a39a6b719458b22be5fe jdk-15+3
d54ce919da90dab361995bb4d87be9851f00537a jdk-14+30
bb0a7975b31ded63d594ee8dbfc4d4ead587f79b jdk-15+4

View file

@ -154,6 +154,7 @@ public class GenerateJfrFiles {
boolean startTime;
boolean periodic;
boolean cutoff;
String commitState;
}
static class FieldElement {
@ -219,14 +220,15 @@ public class GenerateJfrFiles {
currentType.name = attributes.getValue("name");
break;
case "Event":
EventElement eventtType = new EventElement();
eventtType.name = attributes.getValue("name");
eventtType.thread = getBoolean(attributes, "thread", false);
eventtType.stackTrace = getBoolean(attributes, "stackTrace", false);
eventtType.startTime = getBoolean(attributes, "startTime", true);
eventtType.periodic = attributes.getValue("period") != null;
eventtType.cutoff = getBoolean(attributes, "cutoff", false);
currentType = eventtType;
EventElement eventType = new EventElement();
eventType.name = attributes.getValue("name");
eventType.thread = getBoolean(attributes, "thread", false);
eventType.stackTrace = getBoolean(attributes, "stackTrace", false);
eventType.startTime = getBoolean(attributes, "startTime", true);
eventType.periodic = attributes.getValue("period") != null;
eventType.cutoff = getBoolean(attributes, "cutoff", false);
eventType.commitState = attributes.getValue("commitState");
currentType = eventType;
break;
case "Field":
currentField = new FieldElement(metadata);
@ -459,6 +461,7 @@ public class GenerateJfrFiles {
out.write("#include \"utilities/ticks.hpp\"");
out.write("#if INCLUDE_JFR");
out.write("#include \"jfr/recorder/service/jfrEvent.hpp\"");
out.write("#include \"jfr/support/jfrEpochSynchronization.hpp\"");
out.write("/*");
out.write(" * Each event class has an assert member function verify() which is invoked");
out.write(" * just before the engine writes the event and its fields to the data stream.");
@ -523,7 +526,7 @@ public class GenerateJfrFiles {
}
out.write("");
if (!empty) {
printWriteData(out, t.fields);
printWriteData(out, t.fields, null);
}
out.write("};");
out.write("");
@ -566,7 +569,7 @@ public class GenerateJfrFiles {
}
out.write("");
if (!empty) {
printWriteData(out, event.fields);
printWriteData(out, event.fields, event.commitState);
out.write("");
}
out.write(" using JfrEvent<Event" + event.name + ">::commit; // else commit() is hidden by overloaded versions in this class");
@ -578,9 +581,13 @@ public class GenerateJfrFiles {
out.write("};");
}
private static void printWriteData(Printer out, List<FieldElement> fields) {
private static void printWriteData(Printer out, List<FieldElement> fields, String commitState) {
out.write(" template <typename Writer>");
out.write(" void writeData(Writer& w) {");
if (("_thread_in_native").equals(commitState)) {
out.write(" // explicit epoch synchronization check");
out.write(" JfrEpochSynchronization sync;");
}
for (FieldElement field : fields) {
if (field.struct) {
out.write(" _" + field.name + ".writeData(w);");

View file

@ -2193,7 +2193,7 @@ const bool Matcher::need_masked_shift_count = false;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}

View file

@ -1080,7 +1080,7 @@ const bool Matcher::convi2l_type_required = true;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}

View file

@ -2438,7 +2438,7 @@ const bool Matcher::need_masked_shift_count = true;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}

View file

@ -1661,7 +1661,7 @@ const bool Matcher::need_masked_shift_count = false;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}

View file

@ -1818,7 +1818,7 @@ const bool Matcher::need_masked_shift_count = false;
// No support for generic vector operands.
const bool Matcher::supports_generic_vector_operands = false;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg) {
MachOper* Matcher::specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}

View file

@ -1438,9 +1438,14 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
// x86 supports generic vector operands: vec and legVec.
const bool Matcher::supports_generic_vector_operands = true;
MachOper* Matcher::specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg) {
MachOper* Matcher::specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
assert(Matcher::is_generic_vector(generic_opnd), "not generic");
bool legacy = (generic_opnd->opcode() == LEGVEC);
if (!VM_Version::supports_avx512vlbwdq() && // KNL
is_temp && !legacy && (ideal_reg == Op_VecZ)) {
// Conservatively specialize 512bit vec TEMP operands to legVecZ (zmm0-15) on KNL.
return new legVecZOper();
}
if (legacy) {
switch (ideal_reg) {
case Op_VecS: return new legVecSOper();

View file

@ -104,7 +104,7 @@ void CompilationPolicy::compile_if_required(const methodHandle& selected_method,
return;
}
CompileBroker::compile_method(selected_method, InvocationEntryBci,
CompilationPolicy::policy()->initial_compile_level(),
CompilationPolicy::policy()->initial_compile_level(selected_method),
methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK);
}
}

View file

@ -59,7 +59,7 @@ public:
static CompileTask* select_task_helper(CompileQueue* compile_queue);
// Return initial compile level that is used with Xcomp
virtual CompLevel initial_compile_level() = 0;
virtual CompLevel initial_compile_level(const methodHandle& method) = 0;
virtual int compiler_count(CompLevel comp_level) = 0;
// main notification entry, return a pointer to an nmethod if the OSR is required,
// returns NULL otherwise.
@ -97,7 +97,7 @@ class SimpleCompPolicy : public CompilationPolicy {
void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread);
public:
SimpleCompPolicy() : _compiler_count(0) { }
virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; }
virtual CompLevel initial_compile_level(const methodHandle& m) { return CompLevel_highest_tier; }
virtual int compiler_count(CompLevel comp_level);
virtual void do_safepoint_work();
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);

View file

@ -57,9 +57,6 @@ bool CompilationModeFlag::initialize() {
jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', supported modes are: quick-only, high-only, high-only-quick-internal\n", CompilationMode);
return false;
}
if (disable_intermediate()) {
CompLevel_initial_compile = CompLevel_full_optimization;
}
}
return true;
}
@ -74,16 +71,6 @@ CompLevel CompLevel_highest_tier = CompLevel_simple; // pure C
CompLevel CompLevel_highest_tier = CompLevel_none;
#endif
#if defined(TIERED)
CompLevel CompLevel_initial_compile = CompLevel_full_profile; // tiered
#elif defined(COMPILER1) || INCLUDE_JVMCI
CompLevel CompLevel_initial_compile = CompLevel_simple; // pure C1 or JVMCI
#elif defined(COMPILER2)
CompLevel CompLevel_initial_compile = CompLevel_full_optimization; // pure C2
#else
CompLevel CompLevel_initial_compile = CompLevel_none;
#endif
#if defined(COMPILER2)
CompMode Compilation_mode = CompMode_server;
#elif defined(COMPILER1)
@ -145,7 +132,6 @@ intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
void set_client_compilation_mode() {
Compilation_mode = CompMode_client;
CompLevel_highest_tier = CompLevel_simple;
CompLevel_initial_compile = CompLevel_simple;
FLAG_SET_ERGO(TieredCompilation, false);
FLAG_SET_ERGO(ProfileInterpreter, false);
#if INCLUDE_JVMCI

View file

@ -83,7 +83,6 @@ public:
#endif
extern CompLevel CompLevel_highest_tier;
extern CompLevel CompLevel_initial_compile;
enum CompMode {
CompMode_none = 0,

View file

@ -307,6 +307,78 @@ void TieredThresholdPolicy::initialize() {
set_start_time(os::javaTimeMillis());
}
#ifdef ASSERT
bool TieredThresholdPolicy::verify_level(CompLevel level) {
// AOT and interpreter levels are always valid.
if (level == CompLevel_aot || level == CompLevel_none) {
return true;
}
if (CompilationModeFlag::normal()) {
return true;
} else if (CompilationModeFlag::quick_only()) {
return level == CompLevel_simple;
} else if (CompilationModeFlag::high_only()) {
return level == CompLevel_full_optimization;
} else if (CompilationModeFlag::high_only_quick_internal()) {
return level == CompLevel_full_optimization || level == CompLevel_simple;
}
return false;
}
#endif
CompLevel TieredThresholdPolicy::limit_level(CompLevel level) {
if (CompilationModeFlag::quick_only()) {
level = MIN2(level, CompLevel_simple);
}
assert(verify_level(level), "Invalid compilation level %d", level);
if (level <= TieredStopAtLevel) {
return level;
}
// Some compilation levels are not valid depending on a compilation mode:
// a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
// b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
// c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
// The invalid levels are actually sequential so a single comparison is sufficient.
// Down here we already have (level > TieredStopAtLevel), which also implies that
// (TieredStopAtLevel < Highest Possible Level), so we need to return a level that is:
// a) a max level that is strictly less than the highest for a given compilation mode
// b) less or equal to TieredStopAtLevel
if (CompilationModeFlag::normal() || CompilationModeFlag::quick_only()) {
return (CompLevel)TieredStopAtLevel;
}
if (CompilationModeFlag::high_only() || CompilationModeFlag::high_only_quick_internal()) {
return MIN2(CompLevel_none, (CompLevel)TieredStopAtLevel);
}
ShouldNotReachHere();
return CompLevel_any;
}
CompLevel TieredThresholdPolicy::initial_compile_level_helper(const methodHandle& method) {
if (CompilationModeFlag::normal()) {
return CompLevel_full_profile;
} else if (CompilationModeFlag::quick_only()) {
return CompLevel_simple;
} else if (CompilationModeFlag::high_only()) {
return CompLevel_full_optimization;
} else if (CompilationModeFlag::high_only_quick_internal()) {
if (force_comp_at_level_simple(method)) {
return CompLevel_simple;
} else {
return CompLevel_full_optimization;
}
}
ShouldNotReachHere();
return CompLevel_any;
}
CompLevel TieredThresholdPolicy::initial_compile_level(const methodHandle& method) {
return limit_level(initial_compile_level_helper(method));
}
void TieredThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
counter->set_carry_flag();
@ -457,12 +529,7 @@ nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHa
// Check if the method can be compiled, change level if necessary
void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
assert(level <= TieredStopAtLevel, "Invalid compilation level");
if (CompilationModeFlag::quick_only()) {
assert(level <= CompLevel_simple, "Invalid compilation level");
} else if (CompilationModeFlag::disable_intermediate()) {
assert(level != CompLevel_full_profile && level != CompLevel_limited_profile, "C1 profiling levels shouldn't be used with intermediate levels disabled");
}
assert(verify_level(level) && level <= TieredStopAtLevel, "Invalid compilation level %d", level);
if (level == CompLevel_none) {
if (mh->has_compiled_code()) {
@ -924,9 +991,11 @@ CompLevel TieredThresholdPolicy::common(Predicate p, const methodHandle& method,
}
}
}
return MIN2(next_level, CompilationModeFlag::quick_only() ? CompLevel_simple : (CompLevel)TieredStopAtLevel);
return limit_level(next_level);
}
// Determine if a method should be compiled with a normal entry point at a different level.
CompLevel TieredThresholdPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* thread) {
CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
@ -1027,7 +1096,7 @@ void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, con
if (level == CompLevel_aot) {
// Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
CompLevel enclosing_level = MIN2(CompilationModeFlag::quick_only() ? CompLevel_simple : (CompLevel)TieredStopAtLevel, CompLevel_full_profile);
CompLevel enclosing_level = limit_level(CompLevel_full_profile);
compile(mh, InvocationEntryBci, enclosing_level, thread);
}
} else {

View file

@ -170,8 +170,14 @@ class TieredThresholdPolicy : public CompilationPolicy {
inline void set_carry_if_necessary(InvocationCounter *counter);
// Set carry flags in the counters (in Method* and MDO).
inline void handle_counter_overflow(Method* method);
// Verify that a level is consistent with the compilation mode
bool verify_level(CompLevel level);
// Clamp the request level according to various constraints.
inline CompLevel limit_level(CompLevel level);
// Return desired initial compilation level for Xcomp
CompLevel initial_compile_level_helper(const methodHandle& method);
// Call and loop predicates determine whether a transition to a higher compilation
// level should be performed (pointers to predicate functions are passed to common_TF().
// level should be performed (pointers to predicate functions are passed to common().
// Predicates also take compiler load into account.
typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, const methodHandle& method);
bool call_predicate(int i, int b, CompLevel cur_level, const methodHandle& method);
@ -253,7 +259,8 @@ public:
if (is_c2_compile(comp_level)) return c2_count();
return 0;
}
virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); }
// Return initial compile level to use with Xcomp (depends on compilation mode).
virtual CompLevel initial_compile_level(const methodHandle& method);
virtual void do_safepoint_work() { }
virtual void delay_compilation(Method* method) { }
virtual void disable_compilation(Method* method) { }

View file

@ -464,7 +464,7 @@
<Field type="ulong" contentType="bytes" name="used" label="Used" />
</Event>
<Event name="Compilation" category="Java Virtual Machine, Compiler" label="Compilation" thread="true">
<Event name="Compilation" category="Java Virtual Machine, Compiler" label="Compilation" thread="true" commitState="_thread_in_native">
<Field type="uint" name="compileId" label="Compilation Identifier" relation="CompileId" />
<Field type="CompilerType" name="compiler" label="Compiler" />
<Field type="Method" name="method" label="Method" />
@ -492,7 +492,7 @@
<Field type="string" name="descriptor" label="Method Descriptor" />
</Type>
<Event name="CompilerInlining" category="Java Virtual Machine, Compiler, Optimization" label="Method Inlining" thread="true" startTime="false">
<Event name="CompilerInlining" category="Java Virtual Machine, Compiler, Optimization" label="Method Inlining" thread="true" startTime="false" commitState="_thread_in_native">
<Field type="uint" name="compileId" label="Compilation Identifier" relation="CompileId" />
<Field type="Method" name="caller" label="Caller Method" />
<Field type="CalleeMethod" name="callee" struct="true" label="Callee Method" />

View file

@ -70,6 +70,7 @@
<xs:attribute name="stackTrace" type="xs:boolean" use="optional" />
<xs:attribute name="period" type="periodType" use="optional" />
<xs:attribute name="cutoff" type="xs:boolean" use="optional" />
<xs:attribute name="commitState" type="xs:string" use="optional" />
</xs:complexType>
</xs:element>
<xs:element maxOccurs="unbounded" name="Type">

View file

@ -44,9 +44,9 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
@ -172,7 +172,7 @@ static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t ret
}
bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
return _service_thread != thread && _checkpoint_epoch_state != JfrTraceIdEpoch::epoch();
return _service_thread != thread && Atomic::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
}
static const size_t lease_retry = 10;
@ -333,7 +333,19 @@ static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwri
return wo.processed();
}
void JfrCheckpointManager::synchronize_epoch() {
void JfrCheckpointManager::begin_epoch_shift() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
JfrTraceIdEpoch::begin_epoch_shift();
}
void JfrCheckpointManager::end_epoch_shift() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
JfrTraceIdEpoch::end_epoch_shift();
assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
}
void JfrCheckpointManager::synchronize_checkpoint_manager_with_current_epoch() {
assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant");
OrderAccess::storestore();
_checkpoint_epoch_state = JfrTraceIdEpoch::epoch();
@ -341,7 +353,7 @@ void JfrCheckpointManager::synchronize_epoch() {
size_t JfrCheckpointManager::write() {
const size_t processed = write_mspace<MutexedWriteOp, CompositeOperation>(_free_list_mspace, _chunkwriter);
synchronize_epoch();
synchronize_checkpoint_manager_with_current_epoch();
return processed;
}
@ -361,11 +373,11 @@ size_t JfrCheckpointManager::flush() {
typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
size_t JfrCheckpointManager::clear() {
JfrTypeSet::clear();
clear_type_set();
DiscardOperation discarder(mutexed); // mutexed discard mode
process_free_list(discarder, _free_list_mspace);
process_free_list(discarder, _epoch_transition_mspace);
synchronize_epoch();
synchronize_checkpoint_manager_with_current_epoch();
return discarder.elements();
}
@ -410,18 +422,21 @@ size_t JfrCheckpointManager::write_static_type_set_and_threads() {
return write_epoch_transition_mspace();
}
void JfrCheckpointManager::shift_epoch() {
debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
JfrTraceIdEpoch::shift_epoch();
assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
}
void JfrCheckpointManager::on_rotation() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
JfrTypeManager::on_rotation();
notify_threads();
}
void JfrCheckpointManager::clear_type_set() {
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(!JfrRecorder::is_recording(), "invariant");
// can safepoint here
MutexLocker cld_lock(ClassLoaderDataGraph_lock);
MutexLocker module_lock(Module_lock);
JfrTypeSet::clear();
}
void JfrCheckpointManager::write_type_set() {
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
if (LeakProfiler::is_running()) {

View file

@ -85,11 +85,14 @@ class JfrCheckpointManager : public JfrCHeapObj {
size_t write_threads();
size_t write_static_type_set_and_threads();
bool is_type_set_required();
void clear_type_set();
void write_type_set();
static void write_type_set_for_unloaded_classes();
void shift_epoch();
void synchronize_epoch();
void begin_epoch_shift();
void end_epoch_shift();
void synchronize_checkpoint_manager_with_current_epoch();
void notify_threads();
JfrCheckpointManager(JfrChunkWriter& cw);

View file

@ -168,6 +168,7 @@ static void set_serialized(const T* ptr) {
assert(ptr != NULL, "invariant");
SET_SERIALIZED(ptr);
assert(IS_SERIALIZED(ptr), "invariant");
CLEAR_THIS_EPOCH_CLEARED_BIT(ptr);
}
/*
@ -303,7 +304,7 @@ static bool write_klasses() {
_subsystem_callback = &callback;
do_klasses();
} else {
LeakKlassWriter lkw(_leakp_writer, _artifacts, _class_unload);
LeakKlassWriter lkw(_leakp_writer, _class_unload);
CompositeKlassWriter ckw(&lkw, &kw);
CompositeKlassWriterRegistration ckwr(&ckw, &reg);
CompositeKlassCallback callback(&ckwr);
@ -332,6 +333,26 @@ static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
assert(IS_NOT_SERIALIZED(value), "invariant");
}
typedef JfrArtifactCallbackHost<KlassPtr, KlassArtifactRegistrator> RegistrationCallback;
static void register_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
do_previous_epoch_artifact(_subsystem_callback, klass);
}
static void do_register_klasses() {
ClassLoaderDataGraph::classes_do(&register_klass);
}
static void register_klasses() {
assert(!_artifacts->has_klass_entries(), "invariant");
KlassArtifactRegistrator reg(_artifacts);
RegistrationCallback callback(&reg);
_subsystem_callback = &callback;
do_register_klasses();
}
static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) {
assert(writer != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
@ -422,6 +443,15 @@ static void write_packages() {
_artifacts->tally(pw);
}
typedef JfrArtifactCallbackHost<PkgPtr, ClearArtifact<PkgPtr> > ClearPackageCallback;
static void clear_packages() {
ClearArtifact<PkgPtr> clear;
ClearPackageCallback callback(&clear);
_subsystem_callback = &callback;
do_packages();
}
static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) {
assert(mod != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
@ -512,6 +542,15 @@ static void write_modules() {
_artifacts->tally(mw);
}
typedef JfrArtifactCallbackHost<ModPtr, ClearArtifact<ModPtr> > ClearModuleCallback;
static void clear_modules() {
ClearArtifact<ModPtr> clear;
ClearModuleCallback callback(&clear);
_subsystem_callback = &callback;
do_modules();
}
static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp) {
assert(cld != NULL, "invariant");
assert(!cld->is_unsafe_anonymous(), "invariant");
@ -639,6 +678,15 @@ static void write_classloaders() {
_artifacts->tally(cldw);
}
typedef JfrArtifactCallbackHost<CldPtr, ClearArtifact<CldPtr> > ClearCLDCallback;
static void clear_classloaders() {
ClearArtifact<CldPtr> clear;
ClearCLDCallback callback(&clear);
_subsystem_callback = &callback;
do_class_loaders();
}
static u1 get_visibility(MethodPtr method) {
assert(method != NULL, "invariant");
return const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0;
@ -649,6 +697,7 @@ void set_serialized<Method>(MethodPtr method) {
assert(method != NULL, "invariant");
SET_METHOD_SERIALIZED(method);
assert(IS_METHOD_SERIALIZED(method), "invariant");
CLEAR_THIS_EPOCH_METHOD_CLEARED_BIT(method);
}
static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leakp) {
@ -888,24 +937,23 @@ static void write_symbols() {
_artifacts->tally(sw);
}
static bool clear_artifacts = false;
void JfrTypeSet::clear() {
clear_artifacts = true;
}
typedef Wrapper<KlassPtr, ClearArtifact> ClearKlassBits;
typedef Wrapper<MethodPtr, ClearArtifact> ClearMethodFlag;
typedef MethodIteratorHost<ClearMethodFlag, ClearKlassBits, AlwaysTrue, false> ClearKlassAndMethods;
static bool clear_artifacts = false;
static void clear_klasses_and_methods() {
ClearKlassAndMethods clear(_writer);
_artifacts->iterate_klasses(clear);
}
static size_t teardown() {
assert(_artifacts != NULL, "invariant");
const size_t total_count = _artifacts->total_count();
if (previous_epoch()) {
assert(_writer != NULL, "invariant");
ClearKlassAndMethods clear(_writer);
_artifacts->iterate_klasses(clear);
JfrTypeSet::clear();
clear_klasses_and_methods();
clear_artifacts = true;
++checkpoint_id;
}
return total_count;
@ -945,3 +993,16 @@ size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* l
write_symbols();
return teardown();
}
/**
* Clear all tags from the previous epoch.
*/
void JfrTypeSet::clear() {
clear_artifacts = true;
setup(NULL, NULL, false, false);
register_klasses();
clear_packages();
clear_modules();
clear_classloaders();
clear_klasses_and_methods();
}

View file

@ -79,9 +79,10 @@ template <typename T>
class ClearArtifact {
public:
bool operator()(T const& value) {
CLEAR_METHOD_AND_CLASS_PREV_EPOCH(value);
CLEAR_SERIALIZED(value);
assert(IS_NOT_SERIALIZED(value), "invariant");
SET_PREV_EPOCH_CLEARED_BIT(value);
CLEAR_METHOD_AND_CLASS_PREV_EPOCH(value);
return true;
}
};
@ -91,9 +92,10 @@ class ClearArtifact<const Method*> {
public:
bool operator()(const Method* method) {
assert(METHOD_FLAG_USED_PREV_EPOCH(method), "invariant");
CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
CLEAR_METHOD_SERIALIZED(method);
assert(METHOD_NOT_SERIALIZED(method), "invariant");
SET_PREV_EPOCH_METHOD_CLEARED_BIT(method);
CLEAR_METHOD_FLAG_USED_PREV_EPOCH(method);
return true;
}
};

View file

@ -39,10 +39,27 @@
#include "runtime/thread.inline.hpp"
#include "utilities/debug.hpp"
inline bool is_not_tagged(traceid value) {
const traceid this_epoch_bit = JfrTraceIdEpoch::in_use_this_epoch_bit();
return (value & ((this_epoch_bit << META_SHIFT) | this_epoch_bit)) != this_epoch_bit;
}
template <typename T>
inline bool should_tag(const T* t) {
assert(t != NULL, "invariant");
return is_not_tagged(TRACE_ID_RAW(t));
}
template <>
inline bool should_tag<Method>(const Method* method) {
assert(method != NULL, "invariant");
return is_not_tagged((traceid)method->trace_flags());
}
template <typename T>
inline traceid set_used_and_get(const T* type) {
assert(type != NULL, "invariant");
if (SHOULD_TAG(type)) {
if (should_tag(type)) {
SET_USED_THIS_EPOCH(type);
JfrTraceIdEpoch::set_changed_tag_state();
}
@ -61,7 +78,13 @@ inline traceid JfrTraceId::get(const Thread* t) {
}
inline traceid JfrTraceId::use(const Klass* klass) {
return set_used_and_get(klass);
assert(klass != NULL, "invariant");
if (should_tag(klass)) {
SET_USED_THIS_EPOCH(klass);
JfrTraceIdEpoch::set_changed_tag_state();
}
assert(USED_THIS_EPOCH(klass), "invariant");
return get(klass);
}
inline traceid JfrTraceId::use(const Method* method) {
@ -71,15 +94,12 @@ inline traceid JfrTraceId::use(const Method* method) {
inline traceid JfrTraceId::use(const Klass* klass, const Method* method) {
assert(klass != NULL, "invariant");
assert(method != NULL, "invariant");
if (SHOULD_TAG_KLASS_METHOD(klass)) {
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
}
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
assert(USED_THIS_EPOCH(klass), "invariant");
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
JfrTraceIdEpoch::set_changed_tag_state();
}
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
return (METHOD_ID(klass, method));
}

View file

@ -26,12 +26,20 @@
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "runtime/safepoint.hpp"
// Alternating epochs on each rotation allow for concurrent tagging.
// The epoch shift happens only during a safepoint.
bool JfrTraceIdEpoch::_epoch_state = false;
bool volatile JfrTraceIdEpoch::_tag_state = false;
bool JfrTraceIdEpoch::_synchronizing = false;
volatile bool JfrTraceIdEpoch::_changed_tag_state = false;
void JfrTraceIdEpoch::shift_epoch() {
void JfrTraceIdEpoch::begin_epoch_shift() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
_epoch_state = !_epoch_state;
_synchronizing = true;
OrderAccess::fence();
}
void JfrTraceIdEpoch::end_epoch_shift() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(_synchronizing, "invariant");
_epoch_state = !_epoch_state;
OrderAccess::storestore();
_synchronizing = false;
}

View file

@ -41,13 +41,33 @@
#define METHOD_AND_CLASS_IN_USE_EPOCH_1_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_1_SHIFT)
#define METHOD_AND_CLASS_IN_USE_EPOCH_2_BITS (METHOD_AND_CLASS_IN_USE_BITS << EPOCH_2_SHIFT)
// Epoch alternation on each rotation allow for concurrent tagging.
// The epoch shift happens only during a safepoint.
//
// _synchronizing is a transition state, the purpose of which is to
// have JavaThreads that run _thread_in_native (i.e. Compiler threads)
// respect the current epoch shift in-progress during a safepoint.
//
// _changed_tag_state == true signals an incremental modification to artifact tagging
// (klasses, methods, CLDs, etc), used to request collection of artifacts.
//
class JfrTraceIdEpoch : AllStatic {
friend class JfrCheckpointManager;
private:
static bool _epoch_state;
static bool volatile _tag_state;
static bool _synchronizing;
static volatile bool _changed_tag_state;
static void shift_epoch();
static void begin_epoch_shift();
static void end_epoch_shift();
static bool changed_tag_state() {
return Atomic::load_acquire(&_changed_tag_state);
}
static void set_tag_state(bool value) {
Atomic::release_store(&_changed_tag_state, value);
}
public:
static bool epoch() {
@ -66,6 +86,10 @@ class JfrTraceIdEpoch : AllStatic {
return _epoch_state ? (u1)0 : (u1)1;
}
static bool is_synchronizing() {
return Atomic::load_acquire(&_synchronizing);
}
static traceid in_use_this_epoch_bit() {
return _epoch_state ? USED_EPOCH_2_BIT : USED_EPOCH_1_BIT;
}
@ -91,16 +115,16 @@ class JfrTraceIdEpoch : AllStatic {
}
static bool has_changed_tag_state() {
if (Atomic::load_acquire(&_tag_state)) {
Atomic::release_store(&_tag_state, false);
if (changed_tag_state()) {
set_tag_state(false);
return true;
}
return false;
}
static void set_changed_tag_state() {
if (!Atomic::load_acquire(&_tag_state)) {
Atomic::release_store(&_tag_state, true);
if (!changed_tag_state()) {
set_tag_state(true);
}
}
};

View file

@ -44,15 +44,19 @@
// static bits
#define META_SHIFT 8
#define LEAKP_META_BIT USED_BIT
#define EPOCH_1_CLEARED_META_BIT USED_BIT
#define EPOCH_1_CLEARED_BIT (EPOCH_1_CLEARED_META_BIT << META_SHIFT)
#define EPOCH_2_CLEARED_META_BIT (USED_BIT << 1)
#define EPOCH_2_CLEARED_BIT (EPOCH_2_CLEARED_META_BIT << META_SHIFT)
#define LEAKP_META_BIT (USED_BIT << 2)
#define LEAKP_BIT (LEAKP_META_BIT << META_SHIFT)
#define TRANSIENT_META_BIT (USED_BIT << 1)
#define TRANSIENT_META_BIT (USED_BIT << 3)
#define TRANSIENT_BIT (TRANSIENT_META_BIT << META_SHIFT)
#define SERIALIZED_META_BIT (USED_BIT << 2)
#define SERIALIZED_META_BIT (USED_BIT << 4)
#define SERIALIZED_BIT (SERIALIZED_META_BIT << META_SHIFT)
#define TRACE_ID_SHIFT 16
#define METHOD_ID_NUM_MASK ((1 << TRACE_ID_SHIFT) - 1)
#define META_BITS (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT)
#define META_BITS (SERIALIZED_BIT | TRANSIENT_BIT | LEAKP_BIT | EPOCH_2_CLEARED_BIT | EPOCH_1_CLEARED_BIT)
#define EVENT_BITS (EVENT_HOST_KLASS | JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)
#define USED_BITS (METHOD_USED_EPOCH_2_BIT | METHOD_USED_EPOCH_1_BIT | USED_EPOCH_2_BIT | USED_EPOCH_1_BIT)
#define ALL_BITS (META_BITS | EVENT_BITS | USED_BITS)
@ -130,12 +134,16 @@
#define SHOULD_TAG_KLASS_METHOD(ptr) (METHOD_NOT_USED_THIS_EPOCH(ptr))
#define SET_SERIALIZED(ptr) (TRACE_ID_META_TAG(ptr, SERIALIZED_META_BIT))
#define CLEAR_SERIALIZED(ptr) (TRACE_ID_META_CLEAR(ptr, META_MASK))
#define SET_PREV_EPOCH_CLEARED_BIT(ptr) (TRACE_ID_META_TAG(ptr, IN_USE_PREV_EPOCH_BIT))
#define IS_METHOD_SERIALIZED(method) (METHOD_FLAG_PREDICATE(method, SERIALIZED_BIT))
#define IS_METHOD_LEAKP_USED(method) (METHOD_FLAG_PREDICATE(method, LEAKP_BIT))
#define METHOD_NOT_SERIALIZED(method) (!(IS_METHOD_SERIALIZED(method)))
#define SET_METHOD_LEAKP(method) (METHOD_META_TAG(method, LEAKP_META_BIT))
#define SET_METHOD_SERIALIZED(method) (METHOD_META_TAG(method, SERIALIZED_META_BIT))
#define CLEAR_METHOD_SERIALIZED(method) (METHOD_META_CLEAR(method, META_MASK))
#define SET_PREV_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_TAG(ptr, IN_USE_PREV_EPOCH_BIT))
#define CLEAR_LEAKP(ptr) (TRACE_ID_META_CLEAR(ptr, (~(LEAKP_META_BIT))))
#define CLEAR_THIS_EPOCH_CLEARED_BIT(ptr) (TRACE_ID_META_CLEAR(ptr,(~(IN_USE_THIS_EPOCH_BIT))))
#define CLEAR_THIS_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_CLEAR(ptr,(~(IN_USE_THIS_EPOCH_BIT))))
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP

View file

@ -46,9 +46,9 @@
#include "jfr/utilities/jfrTypes.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
@ -347,26 +347,48 @@ JfrRecorderService::JfrRecorderService() :
_storage(JfrStorage::instance()),
_string_pool(JfrStringPool::instance()) {}
static bool recording = false;
enum RecorderState {
STOPPED,
RUNNING
};
static void set_recording_state(bool is_recording) {
static RecorderState recorder_state = STOPPED;
static void set_recorder_state(RecorderState from, RecorderState to) {
assert(from == recorder_state, "invariant");
OrderAccess::storestore();
recording = is_recording;
recorder_state = to;
}
static void start_recorder() {
set_recorder_state(STOPPED, RUNNING);
log_debug(jfr, system)("Recording service STARTED");
}
static void stop_recorder() {
set_recorder_state(RUNNING, STOPPED);
log_debug(jfr, system)("Recording service STOPPED");
}
bool JfrRecorderService::is_recording() {
return recording;
const bool is_running = recorder_state == RUNNING;
OrderAccess::loadload();
return is_running;
}
void JfrRecorderService::start() {
MutexLocker lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
log_debug(jfr, system)("Request to START recording");
assert(!is_recording(), "invariant");
clear();
set_recording_state(true);
assert(is_recording(), "invariant");
open_new_chunk();
log_debug(jfr, system)("Recording STARTED");
start_recorder();
assert(is_recording(), "invariant");
}
static void stop() {
assert(JfrRecorderService::is_recording(), "invariant");
stop_recorder();
assert(!JfrRecorderService::is_recording(), "invariant");
}
void JfrRecorderService::clear() {
@ -390,11 +412,12 @@ void JfrRecorderService::invoke_safepoint_clear() {
void JfrRecorderService::safepoint_clear() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
_checkpoint_manager.begin_epoch_shift();
_string_pool.clear();
_storage.clear();
_checkpoint_manager.shift_epoch();
_chunkwriter.set_time_stamp();
_stack_trace_repository.clear();
_checkpoint_manager.end_epoch_shift();
}
void JfrRecorderService::post_safepoint_clear() {
@ -411,14 +434,6 @@ void JfrRecorderService::open_new_chunk(bool vm_error) {
}
}
static void stop() {
assert(JfrStream_lock->owned_by_self(), "invariant");
assert(JfrRecorderService::is_recording(), "invariant");
log_debug(jfr, system)("Recording STOPPED");
set_recording_state(false);
assert(!JfrRecorderService::is_recording(), "invariant");
}
// 'rotation_safepoint_pending' is currently only relevant in the unusual case of an emergency dump.
// Since the JfrStream_lock must be acquired using _no_safepoint_check,
// if the thread running the emergency dump is a JavaThread, a pending safepoint, induced by rotation,
@ -559,14 +574,15 @@ void JfrRecorderService::safepoint_write() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(is_rotation_safepoint_pending(), "invariant");
set_rotation_safepoint_pending(false);
_checkpoint_manager.begin_epoch_shift();
if (_string_pool.is_modified()) {
write_stringpool_safepoint(_string_pool, _chunkwriter);
}
_checkpoint_manager.on_rotation();
_storage.write_at_safepoint();
_checkpoint_manager.shift_epoch();
_chunkwriter.set_time_stamp();
write_stacktrace(_stack_trace_repository, _chunkwriter, true);
_checkpoint_manager.end_epoch_shift();
}
void JfrRecorderService::post_safepoint_write() {

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/support/jfrEpochSynchronization.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/thread.inline.hpp"
JfrEpochSynchronization::JfrEpochSynchronization() {
if (JfrTraceIdEpoch::is_synchronizing()) {
// only at a safepoint
Thread* thread = Thread::current();
assert(thread != NULL, "invariant");
assert(thread->is_Java_thread(), "invariant");
JavaThread* const jt = (JavaThread*)thread;
assert(jt->thread_state() == _thread_in_native, "invariant");
// use ordinary transition to have the thread block and await the new epoch
ThreadInVMfromNative transition(jt);
}
}

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_SUPPORT_JFREPOCHSYNCHRONIZATION_HPP
#define SHARE_JFR_SUPPORT_JFREPOCHSYNCHRONIZATION_HPP
/*
* JavaThreads running _thread_in_native (Compiler threads) must synchronize
* with the upcoming epoch in case there is an epoch shift in-progress.
*/
class JfrEpochSynchronization {
public:
JfrEpochSynchronization();
};
#endif // SHARE_JFR_SUPPORT_JFREPOCHSYNCHRONIZATION_HPP

View file

@ -348,13 +348,12 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
return true;
}
const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) {
const Type* at = phase->type(n);
assert(at != Type::TOP, "unexpected type");
const TypePtr* atp = at->isa_ptr();
const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
if (atp == TypeOopPtr::BOTTOM) {
atp = phase->type(n)->isa_ptr();
}
// adjust atp to be the correct array element address type
atp = atp->add_offset(Type::OffsetBot);
return atp;
return atp->add_offset(Type::OffsetBot);
}
void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
@ -574,8 +573,8 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* src = in(ArrayCopyNode::Src);
Node* dest = in(ArrayCopyNode::Dest);
const TypePtr* atp_src = get_address_type(phase, src);
const TypePtr* atp_dest = get_address_type(phase, dest);
const TypePtr* atp_src = get_address_type(phase, _src_type, src);
const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest);
Node *in_mem = in(TypeFunc::Memory);
if (!in_mem->is_MergeMem()) {

View file

@ -90,7 +90,7 @@ private:
intptr_t get_length_if_constant(PhaseGVN *phase) const;
int get_count(PhaseGVN *phase) const;
static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
static const TypePtr* get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n);
Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
bool prepare_array_copy(PhaseGVN *phase, bool can_reshape,

View file

@ -4280,6 +4280,13 @@ Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt*
return phase->transform(new ConvI2LNode(value, ltype));
}
void Compile::print_inlining_stream_free() {
if (_print_inlining_stream != NULL) {
_print_inlining_stream->~stringStream();
_print_inlining_stream = NULL;
}
}
// The message about the current inlining is accumulated in
// _print_inlining_stream and transfered into the _print_inlining_list
// once we know whether inlining succeeds or not. For regular
@ -4290,13 +4297,21 @@ Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt*
// when the inlining is attempted again.
void Compile::print_inlining_init() {
if (print_inlining() || print_intrinsics()) {
// print_inlining_init is actually called several times.
print_inlining_stream_free();
_print_inlining_stream = new stringStream();
// Watch out: The memory initialized by the constructor call PrintInliningBuffer()
// will be copied into the only initial element. The default destructor of
// PrintInliningBuffer will be called when leaving the scope here. If it
// would destuct the enclosed stringStream _print_inlining_list[0]->_ss
// would be destructed, too!
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
}
void Compile::print_inlining_reinit() {
if (print_inlining() || print_intrinsics()) {
print_inlining_stream_free();
// Re allocate buffer when we change ResourceMark
_print_inlining_stream = new stringStream();
}
@ -4310,7 +4325,7 @@ void Compile::print_inlining_commit() {
assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
// Transfer the message from _print_inlining_stream to the current
// _print_inlining_list buffer and clear _print_inlining_stream.
_print_inlining_list->at(_print_inlining_idx).ss()->write(_print_inlining_stream->as_string(), _print_inlining_stream->size());
_print_inlining_list->at(_print_inlining_idx).ss()->write(_print_inlining_stream->base(), _print_inlining_stream->size());
print_inlining_reset();
}
@ -4391,9 +4406,16 @@ void Compile::process_print_inlining() {
if (do_print_inlining) {
ResourceMark rm;
stringStream ss;
assert(_print_inlining_list != NULL, "process_print_inlining should be called only once.");
for (int i = 0; i < _print_inlining_list->length(); i++) {
ss.print("%s", _print_inlining_list->adr_at(i)->ss()->as_string());
_print_inlining_list->at(i).freeStream();
}
// Reset _print_inlining_list, it only contains destructed objects.
// It is on the arena, so it will be freed when the arena is reset.
_print_inlining_list = NULL;
// _print_inlining_stream won't be used anymore, either.
print_inlining_stream_free();
size_t end = ss.size();
_print_inlining_output = NEW_ARENA_ARRAY(comp_arena(), char, end+1);
strncpy(_print_inlining_output, ss.base(), end+1);

View file

@ -512,6 +512,8 @@ class Compile : public Phase {
PrintInliningBuffer()
: _cg(NULL) { _ss = new stringStream(); }
void freeStream() { _ss->~stringStream(); _ss = NULL; }
stringStream* ss() const { return _ss; }
CallGenerator* cg() const { return _cg; }
void set_cg(CallGenerator* cg) { _cg = cg; }
@ -533,6 +535,7 @@ class Compile : public Phase {
void* _replay_inline_data; // Pointer to data loaded from file
void print_inlining_stream_free();
void print_inlining_init();
void print_inlining_reinit();
void print_inlining_commit();

View file

@ -801,7 +801,8 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
if (TraceLoopPredicate) {
predString->print_cr("<u range");
tty->print("%s", predString->as_string());
tty->print("%s", predString->base());
predString->~stringStream();
}
return bol;
}

View file

@ -118,9 +118,20 @@ IfNode* PhaseIdealLoop::find_unswitching_candidate(const IdealLoopTree *loop) co
// execute.
void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
// Find first invariant test that doesn't exit the loop
LoopNode *head = loop->_head->as_Loop();
Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
if (find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check) != NULL
|| (UseProfiledLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate) != NULL)
|| (UseLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_predicate) != NULL)) {
assert(entry->is_IfProj(), "sanity - must be ifProj since there is at least one predicate");
if (entry->outcnt() > 1) {
// Bailout if there are loop predicates from which there are additional control dependencies (i.e. from
// loop entry 'entry') to previously partially peeled statements since this case is not handled and can lead
// to wrong execution. Remove this bailout, once this is fixed.
return;
}
}
// Find first invariant test that doesn't exit the loop
IfNode* unswitch_iff = find_unswitching_candidate((const IdealLoopTree *)loop);
assert(unswitch_iff != NULL, "should be at least one");
@ -140,7 +151,7 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
#ifdef ASSERT
Node* uniqc = proj_true->unique_ctrl_out();
Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
Node* predicate = find_predicate(entry);
if (predicate != NULL) {
entry = skip_loop_predicates(entry);
@ -281,123 +292,6 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
_igvn.replace_input_of(slow_l, LoopNode::EntryControl, ifslow_pred);
set_idom(slow_l, ifslow_pred, dom_depth(l));
if (iffast != iffast_pred && entry->outcnt() > 1) {
// This situation occurs when only non-CFG nodes (i.e. no control dependencies between them) with a control
// input from the loop header were partially peeled before (now control dependent on loop entry control).
// If additional CFG nodes were peeled, then the insertion point of the loop predicates from the parsing stage
// would not be found anymore and the predicates not cloned at all (i.e. iffast == iffast_pred) as it happens
// for normal peeling. Those partially peeled statements have a control input from the old loop entry control
// and need to be executed after the predicates. These control dependencies need to be removed from the old
// entry control and added to the new entry control nodes 'iffast_pred' and 'ifslow_pred'. Since each node can
// only have one control input, we need to create clones for all statements (2) that can be reached over a path
// from the old entry control 'entry' (1) to a loop phi (8, 9). The old nodes (2) will be moved to the fast loop and the
// new cloned nodes (10) to the slow loop.
//
// The result of the following algorithm is visualized below. The cloned loop predicates for the fast loop
// are between the loop selection node (3) and the entry control for the fast loop (4) and for the slow loop
// between the loop selection node (3) and the entry control for the slow loop (5), respectively.
//
// 1 entry 1 entry
// / \ |
// 2 stmt 3 iff 3 iff
// | / \ / \
// | .. .. .. ..
// | / \ / \
// | 4 iffast_p 5 ifslow_p 4 iffast_p 5 ifslow_p
// | | | / \ / \
// | 6 head 7 slow_head ==> 6 head 2 stmt 7 slow_head 10 cloned_stmt
// | | | \ / \ /
// +--\ | +--\ | 8 phi 9 phi
// | 8 phi | 9 phi
// | |
// +----------+
//
assert(ifslow != ifslow_pred, "sanity - must also be different");
ResourceMark rm;
Unique_Node_List worklist;
Unique_Node_List phis;
Node_List old_clone;
LoopNode* slow_head = old_new[head->_idx]->as_Loop();
// 1) Do a BFS starting from the outputs of the original entry control node 'entry' to all (loop) phis
// and add the non-phi nodes to the worklist.
// First get all outputs of 'entry' which are not the new "loop selection check" 'iff'.
for (DUIterator_Fast imax, i = entry->fast_outs(imax); i < imax; i++) {
Node* stmt = entry->fast_out(i);
if (stmt != iff) {
assert(!stmt->is_CFG(), "cannot be a CFG node");
worklist.push(stmt);
}
}
// Then do a BFS from all collected nodes so far and stop if a phi node is hit.
// Keep track of them on a separate 'phis' list to adjust their inputs later.
for (uint i = 0; i < worklist.size(); i++) {
Node* stmt = worklist.at(i);
for (DUIterator_Fast jmax, j = stmt->fast_outs(jmax); j < jmax; j++) {
Node* out = stmt->fast_out(j);
assert(!out->is_CFG(), "cannot be a CFG node");
if (out->is_Phi()) {
assert(out->in(PhiNode::Region) == head || out->in(PhiNode::Region) == slow_head,
"phi must be either part of the slow or the fast loop");
phis.push(out);
} else {
worklist.push(out);
}
}
}
// 2) All nodes of interest are in 'worklist' and are now cloned. This could not be done simultaneously
// in step 1 in an easy way because we could have cloned a node which has an input that is added to the
// worklist later. As a result, the BFS would hit a clone which does not need to be cloned again.
// While cloning a node, the control inputs to 'entry' are updated such that the old node points to
// 'iffast_pred' and the clone to 'ifslow_pred', respectively.
for (uint i = 0; i < worklist.size(); i++) {
Node* stmt = worklist.at(i);
assert(!stmt->is_CFG(), "cannot be a CFG node");
Node* cloned_stmt = stmt->clone();
old_clone.map(stmt->_idx, cloned_stmt);
_igvn.register_new_node_with_optimizer(cloned_stmt);
if (stmt->in(0) == entry) {
_igvn.replace_input_of(stmt, 0, iffast_pred);
set_ctrl(stmt, iffast_pred);
_igvn.replace_input_of(cloned_stmt, 0, ifslow_pred);
set_ctrl(cloned_stmt, ifslow_pred);
}
}
// 3) Update the entry control of all collected phi nodes of the slow loop to use the cloned nodes
// instead of the old ones from the worklist
for (uint i = 0; i < phis.size(); i++) {
assert(phis.at(i)->is_Phi(), "must be a phi");
PhiNode* phi = phis.at(i)->as_Phi();
if (phi->in(PhiNode::Region) == slow_head) {
// Slow loop: Update phi entry control to use the cloned version instead of the old one from the worklist
Node* entry_control = phi->in(LoopNode::EntryControl);
_igvn.replace_input_of(phi, LoopNode::EntryControl, old_clone[phi->in(LoopNode::EntryControl)->_idx]);
}
}
// 4) Replace all input edges of cloned nodes from old nodes on the worklist by an input edge from their
// corresponding cloned version.
for (uint i = 0; i < worklist.size(); i++) {
Node* stmt = worklist.at(i);
for (uint j = 0; j < stmt->req(); j++) {
Node* in = stmt->in(j);
if (in == NULL) {
continue;
}
if (worklist.contains(in)) {
// Replace the edge old1->clone_of_old_2 with an edge clone_of_old1->clone_of_old2
old_clone[stmt->_idx]->set_req(j, old_clone[in->_idx]);
}
}
}
}
recompute_dom_depth();
return iffast;

View file

@ -2539,7 +2539,7 @@ MachOper* Matcher::specialize_vector_operand_helper(MachNode* m, MachOper* origi
int size_in_bytes = 4 * type2size[t->basic_type()];
ideal_reg = Matcher::vector_ideal_reg(size_in_bytes);
}
return Matcher::specialize_generic_vector_operand(original_opnd, ideal_reg);
return Matcher::specialize_generic_vector_operand(original_opnd, ideal_reg, false);
}
// Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
@ -2551,7 +2551,7 @@ void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
tmp->_opnds[0] = use->_opnds[0]->clone();
} else {
uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
tmp->_opnds[0] = specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg);
tmp->_opnds[0] = specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true);
}
}

View file

@ -519,7 +519,7 @@ public:
MachOper* specialize_vector_operand(MachNode* m, uint idx);
MachOper* specialize_vector_operand_helper(MachNode* m, MachOper* generic_opnd);
static MachOper* specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg);
static MachOper* specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp);
static bool is_generic_reg2reg_move(MachNode* m);
static bool is_generic_vector(MachOper* opnd);

View file

@ -207,7 +207,7 @@ Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *l
Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
bool is_instance = t_oop->is_known_instance_field();
PhaseIterGVN *igvn = phase->is_IterGVN();
if (is_instance && igvn != NULL && result->is_Phi()) {
if (is_instance && igvn != NULL && result->is_Phi()) {
PhiNode *mphi = result->as_Phi();
assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
const TypePtr *t = mphi->adr_type();

View file

@ -192,7 +192,11 @@ template<class E> class GrowableArray : public GenericGrowableArray {
for (; i < _max; i++) ::new ((void*)&_data[i]) E();
}
GrowableArray(Arena* arena, int initial_size, int initial_len, const E& filler) : GenericGrowableArray(arena, initial_size, initial_len) {
// Watch out, if filler was generated by a constructor, the destuctor might
// be called on the original object invalidating all the copies made here.
// Carefully design the copy constructor.
GrowableArray(Arena* arena, int initial_size, int initial_len, const E& filler) :
GenericGrowableArray(arena, initial_size, initial_len) {
_data = (E*)raw_allocate(sizeof(E));
int i = 0;
for (; i < _len; i++) ::new ((void*)&_data[i]) E(filler);

View file

@ -181,9 +181,9 @@ abstract class AbstractStringBuilder implements Appendable, CharSequence {
}
/**
* Returns the current capacity. The capacity is the amount of storage
* available for newly inserted characters, beyond which an allocation
* will occur.
* Returns the current capacity. The capacity is the number of characters
* that can be stored (including already written characters), beyond which
* an allocation will occur.
*
* @return the current capacity
*/

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,10 +90,15 @@ package java.lang;
@jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.RECORDS,
essentialAPI=true)
public abstract class Record {
/**
* Constructor for record classes to call.
*/
protected Record() {}
/**
* Indicates whether some other object is "equal to" this one. In addition
* to the general contract of {@link Object#equals(Object)},
* record classes must further participate in the invariant that when
* to the general contract of {@link Object#equals(Object) Object.equals},
* record classes must further obey the invariant that when
* a record instance is "copied" by passing the result of the record component
* accessor methods to the canonical constructor, as follows:
* <pre>
@ -102,17 +107,38 @@ public abstract class Record {
* then it must be the case that {@code r.equals(copy)}.
*
* @implSpec
* The implicitly provided implementation returns {@code true} if and
* only if the argument is an instance of the same record type as this object,
* and each component of this record is equal to the corresponding component
* of the argument, according to {@link java.util.Objects#equals(Object,Object)}
* for components whose types are reference types, and according to the semantics
* of the {@code equals} method on the corresponding primitive wrapper type.
* The implicitly provided implementation returns {@code true} if
* and only if the argument is an instance of the same record type
* as this object, and each component of this record is equal to
* the corresponding component of the argument; otherwise, {@code
* false} is returned. Equality of a component {@code c} is
* determined as follows:
* <ul>
*
* <li> If the component is of a reference type, the component is
* considered equal if and only if {@link
* java.util.Objects#equals(Object,Object)
* Objects.equals(this.c(), r.c()} would return {@code true}.
*
* <li> If the component is of a primitive type, using the
* corresponding primitive wrapper class {@code PW} (the
* corresponding wrapper class for {@code int} is {@code
* java.lang.Integer}, and so on), the component is considered
* equal if and only if {@code
* PW.valueOf(this.c()).equals(PW.valueOf(r.c()))} would return
* {@code true}.
*
* </ul>
*
* The implicitly provided implementation conforms to the
* semantics described above; the implementation may or may not
* accomplish this by using calls to the particular methods
* listed.
*
* @see java.util.Objects#equals(Object,Object)
*
* @param obj the reference object with which to compare.
* @return {@code true} if this object is the same as the obj
* @return {@code true} if this object is equal to the
* argument; {@code false} otherwise.
*/
@Override

View file

@ -239,6 +239,9 @@ public class AlgorithmId implements Serializable, DerEncoder {
* return a name such as "MD5withRSA" for a signature algorithm on
* some systems. It also returns names like "OID.1.2.3.4", when
* no particular name for the algorithm is known.
*
* Note: for ecdsa-with-SHA2 plus hash algorithm (Ex: SHA-256), this method
* returns the "full" signature algorithm (Ex: SHA256withECDSA) directly.
*/
public String getName() {
String algName = nameTable.get(algid);
@ -248,7 +251,7 @@ public class AlgorithmId implements Serializable, DerEncoder {
if ((params != null) && algid.equals((Object)specifiedWithECDSA_oid)) {
try {
AlgorithmId paramsId =
AlgorithmId.parse(new DerValue(getEncodedParams()));
AlgorithmId.parse(new DerValue(params.toByteArray()));
String paramsName = paramsId.getName();
algName = makeSigAlg(paramsName, "EC");
} catch (IOException e) {
@ -264,12 +267,18 @@ public class AlgorithmId implements Serializable, DerEncoder {
/**
* Returns the DER encoded parameter, which can then be
* used to initialize java.security.AlgorithmParamters.
* used to initialize java.security.AlgorithmParameters.
*
* Note: for ecdsa-with-SHA2 plus hash algorithm (Ex: SHA-256), this method
* returns null because {@link #getName()} has already returned the "full"
* signature algorithm (Ex: SHA256withECDSA).
*
* @return DER encoded parameters, or null not present.
*/
public byte[] getEncodedParams() throws IOException {
return (params == null) ? null : params.toByteArray();
return (params == null || algid.equals(specifiedWithECDSA_oid))
? null
: params.toByteArray();
}
/**

View file

@ -324,6 +324,7 @@ int getAllInterfacesAndAddresses (JNIEnv *env, netif **netifPP)
goto err;
}
loopif->naddrs += c;
loopif->ipv6Index = ptr->Ipv6IfIndex;
} else {
int index = ptr->IfIndex;
if (index != 0) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1455,7 +1455,7 @@ static void setMulticastInterface(JNIEnv *env, jobject this, int fd, int fd1,
* address is bound to and use the IPV6_MULTICAST_IF
* option instead of IP_MULTICAST_IF
*/
if (ipv6_supported) {
if (ipv6_supported && fd1 >= 0) {
static jclass ni_class = NULL;
if (ni_class == NULL) {
jclass c = (*env)->FindClass(env, "java/net/NetworkInterface");
@ -1496,7 +1496,7 @@ static void setMulticastInterface(JNIEnv *env, jobject this, int fd, int fd1,
* On IPv4 system extract addr[0] and use the IP_MULTICAST_IF
* option. For IPv6 both must be done.
*/
if (ipv6_supported) {
if (ipv6_supported && fd1 >= 0) {
static jfieldID ni_indexID = NULL;
struct in_addr in;
int index;
@ -1508,7 +1508,6 @@ static void setMulticastInterface(JNIEnv *env, jobject this, int fd, int fd1,
CHECK_NULL(ni_indexID);
}
index = (*env)->GetIntField(env, value, ni_indexID);
if (isAdapterIpv6Enabled(env, index) != 0) {
if (setsockopt(fd1, IPPROTO_IPV6, IPV6_MULTICAST_IF,
(const char*)&index, sizeof(index)) < 0) {
@ -1523,16 +1522,18 @@ static void setMulticastInterface(JNIEnv *env, jobject this, int fd, int fd1,
return;
}
}
/* If there are any IPv4 addresses on this interface then
* repeat the operation on the IPv4 fd */
if (fd >= 0) {
/* If there are any IPv4 addresses on this interface then
* repeat the operation on the IPv4 fd */
if (getInet4AddrFromIf(env, value, &in) < 0) {
return;
}
if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF,
(const char*)&in, sizeof(in)) < 0) {
JNU_ThrowByNameWithMessageAndLastError
(env, JNU_JAVANETPKG "SocketException", "Error setting socket option");
if (getInet4AddrFromIf(env, value, &in) < 0) {
return;
}
if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF,
(const char*)&in, sizeof(in)) < 0) {
JNU_ThrowByNameWithMessageAndLastError
(env, JNU_JAVANETPKG "SocketException", "Error setting socket option");
}
}
return;
} else {
@ -1877,7 +1878,7 @@ jobject getMulticastInterface(JNIEnv *env, jobject this, int fd, int fd1, jint o
addr = (*env)->GetObjectArrayElement(env, addrArray, 0);
return addr;
} else if (index == 0) { // index == 0 typically means IPv6 not configured on the interfaces
} else if (index == 0 && fd >= 0) {
// falling back to treat interface as configured for IPv4
jobject netObject = NULL;
netObject = getIPv4NetworkInterface(env, this, fd, opt, 0);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View file

@ -520,6 +520,9 @@ static bool read_exec_segments(struct ps_prochandle* ph, ELF_EHDR* exec_ehdr) {
#define LINK_MAP_LD_OFFSET offsetof(struct link_map, l_ld)
#define LINK_MAP_NEXT_OFFSET offsetof(struct link_map, l_next)
#define INVALID_LOAD_ADDRESS -1L
#define ZERO_LOAD_ADDRESS 0x0L
// Calculate the load address of shared library
// on prelink-enabled environment.
//
@ -536,7 +539,7 @@ static uintptr_t calc_prelinked_load_address(struct ps_prochandle* ph, int lib_f
phbuf = read_program_header_table(lib_fd, elf_ehdr);
if (phbuf == NULL) {
print_debug("can't read program header of shared object\n");
return 0L;
return INVALID_LOAD_ADDRESS;
}
// Get the address of .dynamic section from shared library.
@ -552,7 +555,7 @@ static uintptr_t calc_prelinked_load_address(struct ps_prochandle* ph, int lib_f
if (ps_pdread(ph, (psaddr_t)link_map_addr + LINK_MAP_LD_OFFSET,
&lib_ld, sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read address of dynamic section in shared object\n");
return 0L;
return INVALID_LOAD_ADDRESS;
}
// Return the load address which is calculated by the address of .dynamic
@ -663,9 +666,9 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
// continue with other libraries...
} else {
if (read_elf_header(lib_fd, &elf_ehdr)) {
if (lib_base_diff == 0x0L) {
if (lib_base_diff == ZERO_LOAD_ADDRESS ) {
lib_base_diff = calc_prelinked_load_address(ph, lib_fd, &elf_ehdr, link_map_addr);
if (lib_base_diff == 0x0L) {
if (lib_base_diff == INVALID_LOAD_ADDRESS) {
close(lib_fd);
return false;
}

View file

@ -25,6 +25,10 @@
package jdk.jfr.internal.consumer;
import jdk.jfr.internal.LogLevel;
import jdk.jfr.internal.LogTag;
import jdk.jfr.internal.Logger;
import jdk.jfr.internal.LongMap;
/**
@ -90,14 +94,14 @@ final class ConstantMap {
return new Reference(this, id);
}
// should always have a value
// should ideally always have a value
Object value = objects.get(id);
if (value == null) {
// unless is 0 which is used to represent null
if (id == 0) {
return null;
// unless id is 0 which is used to represent null
if (id != 0) {
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Missing object id=" + id + " in pool " + name + ". All ids should reference an object");
}
throw new InternalError("Missing object id=" + id + " in pool " + name + ". All ids should reference object");
return null;
}
// id is resolved (but not the whole pool)

View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8233164
* @summary Test correct wiring of load/store memory for arraycopy ideal transformation.
* @run main/othervm -XX:CompileCommand=dontinline,compiler.arraycopy.TestArrayCopyMemoryChain::test* -Xbatch
* compiler.arraycopy.TestArrayCopyMemoryChain
*/
package compiler.arraycopy;
public class TestArrayCopyMemoryChain {
private String mySweetEscape1 = null;
private String getString(int i) {
return "A" + i + "B";
}
// Original test depending on Indify String Concat
public void test1(int i) {
mySweetEscape1 = getString(i) + "CD";
}
private byte[] mySweetEscape2;
class Wrapper {
public final byte[] array;
public Wrapper(byte[] array) {
this.array = array;
}
}
// Simplified test independent of Strings
public void test2(int idx, int size) {
// Create destination array with unknown size and let it escape.
byte[] dst = new byte[size];
mySweetEscape2 = dst;
// Create constant src1 array.
byte[] src1 = {43, 44};
// Wrap src2 into an Object such that it's only available after
// Escape Analys determined that the Object is non-escaping.
byte[] array = {42};
Wrapper wrapper = new Wrapper(array);
byte[] src2 = wrapper.array;
// Copy src1 and scr2 into destination array.
System.arraycopy(src1, 0, dst, idx, src1.length);
System.arraycopy(src2, 0, dst, 0, src2.length);
}
public static void main(String[] args) {
TestArrayCopyMemoryChain t = new TestArrayCopyMemoryChain();
for (int i = 0; i < 100_000; ++i) {
t.test1(0);
if (!t.mySweetEscape1.equals("A0BCD")) {
throw new RuntimeException("Test1 failed");
}
t.test2(1, 3);
if (t.mySweetEscape2[0] != 42 || t.mySweetEscape2[1] != 43 || t.mySweetEscape2[2] != 44) {
throw new RuntimeException("Test2 failed");
}
}
}
}

View file

@ -23,11 +23,12 @@
/*
* @test
* @bug 8233033
* @summary Tests if partially peeled statements are not executed before the loop predicates of the unswitched fast loop.
* @bug 8233033 8235984
* @summary Tests if partially peeled statements are not executed before the loop predicates by bailing out of loop unswitching.
*
* @run main/othervm -Xbatch -XX:-TieredCompilation
* @run main/othervm -Xbatch
* -XX:CompileCommand=compileonly,compiler.loopopts.PartialPeelingUnswitch::test*
* -XX:CompileCommand=dontinline,compiler.loopopts.PartialPeelingUnswitch::dontInline
* compiler.loopopts.PartialPeelingUnswitch
*/
@ -38,6 +39,7 @@ public class PartialPeelingUnswitch {
public static int iFld;
public static int x = 42;
public static int y = 31;
public static int z = 22;
public static int[] iArr = new int[10];
public int test() {
@ -46,8 +48,9 @@ public class PartialPeelingUnswitch {
* of the cloned loop predicates for the fast loop (set up at unswitching stage). The only partially peeled
* statement "iFld += 7" was wrongly executed before the predicates (and before the loop itself).
* When hitting the uncommon trap, "iFld >>= 1" was not yet executed. As a result, the interpreter directly
* reexecuted "iFld += 7" again. This resulted in a wrong result for "iFld". The fix makes peeled statements
* control dependant on the cloned loop predicates such that they are executed after them.
* reexecuted "iFld += 7" again. This resulted in a wrong result for "iFld". The fix in 8233033 makes peeled
* statements control dependant on the cloned loop predicates such that they are executed after them. However,
* some cases are not handled properly. For now, the new fix in 8235984 just bails out of loop unswitching.
*/
iFld = 13;
for (int i = 0; i < 8; i++) {
@ -103,16 +106,162 @@ public class PartialPeelingUnswitch {
return iFld + k;
}
public int test3() {
iFld = 13;
if (z < 34) {
z = 34;
}
for (int i = 0; i < 8; i++) {
int j = 10;
while (--j > 0) {
iFld += -7;
iArr[5] = 8;
x = iArr[6];
y = x;
for (int k = 50; k < 51; k++) {
x = iArr[7];
}
switch ((i * 5) + 102) {
case 120:
return iFld;
case 103:
break;
case 116:
break;
default:
if (iFld == -7) {
return iFld;
}
z = iArr[5];
iFld >>= 1;
}
}
iArr[5] = 34;
dontInline(iArr[5]);
}
return iFld;
}
public int test4() {
iFld = 13;
if (z < 34) {
z = 34;
}
for (int i = 0; i < 8; i++) {
int j = 10;
while (--j > 0) {
iFld += -7;
iArr[5] = 8;
x = iArr[6];
y = x;
for (int k = 50; k < 51; k++) {
x = iArr[7];
}
switch ((i * 5) + 102) {
case 120:
return iFld;
case 103:
break;
case 116:
break;
default:
if (iFld == -7) {
return iFld;
}
z = iArr[5];
iFld >>= 1;
}
}
iArr[5] = 34;
}
return iFld;
}
public int test5() {
iFld = 13;
for (int i = 0; i < 8; i++) {
int j = 10;
while (--j > 0) {
iFld += -7;
iArr[5] = 8;
x = iArr[6];
y = x;
for (int k = 50; k < 51; k++) {
x = iArr[7];
}
switch ((i * 5) + 102) {
case 120:
return iFld;
case 103:
break;
case 116:
break;
default:
iFld >>= 1;
}
}
}
return iFld;
}
public int test6() {
iFld = 13;
for (int i = 0; i < 8; i++) {
int j = 10;
while (--j > 0) {
iFld += -7;
iArr[5] = 8;
x = iArr[6];
y = x;
switch ((i * 5) + 102) {
case 120:
return iFld;
case 103:
break;
case 116:
break;
default:
iFld >>= 1;
}
}
}
return iFld;
}
public int test7() {
iFld = 13;
for (int i = 0; i < 8; i++) {
int j = 10;
while (--j > 0) {
iFld += -7;
iArr[5] = 8;
switch ((i * 5) + 102) {
case 120:
return iFld;
case 103:
break;
case 116:
break;
default:
iFld >>= 1;
}
}
}
return iFld;
}
public static void main(String[] strArr) {
PartialPeelingUnswitch _instance = new PartialPeelingUnswitch();
for (int i = 0; i < 200; i++) {
for (int i = 0; i < 2000; i++) {
int result = _instance.test();
if (result != -7) {
throw new RuntimeException("Result should always be -7 but was " + result);
}
}
for (int i = 0; i < 200; i++) {
for (int i = 0; i < 2000; i++) {
int result = _instance.test2();
check(-1, result);
check(-7, iFld);
@ -129,6 +278,22 @@ public class PartialPeelingUnswitch {
x = 42;
y = 31;
}
for (int i = 0; i < 2000; i++) {
_instance.test3();
_instance.test4();
_instance.test5();
_instance.test6();
_instance.test7();
}
for (int i = 0; i < 2000; i++) {
if (i % 2 == 0) {
z = 23;
}
_instance.test3();
_instance.test4();
}
}
public static void check(int expected, int actual) {
@ -136,4 +301,6 @@ public class PartialPeelingUnswitch {
throw new RuntimeException("Wrong result, expected: " + expected + ", actual: " + actual);
}
}
public void dontInline(int i) { }
}

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test TieredModesTest
* @summary Check that non-default tiered compilation modes tolerate invalid TieredStopAtLevel values
* @modules java.base/jdk.internal.misc
* java.management
*
* @run main/othervm -XX:+TieredCompilation -XX:CompilationMode=quick-only -XX:TieredStopAtLevel=3
* -XX:CompileCommand=compileonly,compiler.tiered.TieredModesTest::test
* compiler.tiered.TieredModesTest
* @run main/othervm -XX:+TieredCompilation -XX:CompilationMode=high-only -XX:TieredStopAtLevel=3
* -XX:CompileCommand=compileonly,compiler.tiered.TieredModesTest::test
* compiler.tiered.TieredModesTest
* @run main/othervm -XX:+TieredCompilation -XX:CompilationMode=high-only-quick-internal -XX:TieredStopAtLevel=3
* -XX:CompileCommand=compileonly,compiler.tiered.TieredModesTest::test
* compiler.tiered.TieredModesTest
*/
package compiler.tiered;
public class TieredModesTest {
public static int sideEffect = 0;
private static void test() {
sideEffect++;
}
public static void main(String... args) {
for (int i = 0; i < 100_000; i++) {
test();
}
}
}

View file

@ -45,7 +45,7 @@ public final class MacOSGoToFolderCrash {
Robot robot = new Robot();
robot.setAutoDelay(400);
robot.waitForIdle();
// "⌘+Shift+G" Open "Go To Folder" window
// "CMD+Shift+G" - Open "Go To Folder" window
robot.keyPress(KeyEvent.VK_META);
robot.keyPress(KeyEvent.VK_SHIFT);
robot.keyPress(KeyEvent.VK_G);

View file

@ -0,0 +1,174 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.MulticastSocket;
import java.net.NetworkInterface;
import java.util.ArrayList;
import java.util.List;
import jdk.test.lib.NetworkConfiguration;
import jdk.test.lib.net.IPSupport;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import static java.lang.String.format;
import static java.lang.System.out;
import static java.net.StandardSocketOptions.IP_MULTICAST_IF;
import static java.util.stream.Collectors.toList;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
/**
* @test
* @bug 8236441
* @summary Bound MulticastSocket fails when setting outbound interface on Windows
* @library /test/lib
* @run testng IPMulticastIF
* @run testng/othervm -Djava.net.preferIPv4Stack=true IPMulticastIF
* @run testng/othervm -Djava.net.preferIPv6Addresses=true IPMulticastIF
* @run testng/othervm -Djava.net.preferIPv6Addresses=true -Djava.net.preferIPv4Stack=true IPMulticastIF
*/
public class IPMulticastIF {
@BeforeTest
public void sanity() {
IPSupport.throwSkippedExceptionIfNonOperational();
NetworkConfiguration.printSystemConfiguration(out);
}
@DataProvider(name = "scenarios")
public Object[][] positive() throws Exception {
List<InetAddress> addrs = List.of(InetAddress.getLocalHost(),
InetAddress.getLoopbackAddress());
List<Object[]> list = new ArrayList<>();
NetworkConfiguration nc = NetworkConfiguration.probe();
addrs.stream().forEach(a -> nc.multicastInterfaces(true)
.map(nif -> new Object[] { new InetSocketAddress(a, 0), nif })
.forEach(list::add) );
return list.stream().toArray(Object[][]::new);
}
@Test(dataProvider = "scenarios")
public void testSetGetInterfaceBound(InetSocketAddress bindAddr, NetworkInterface nif)
throws Exception
{
out.println(format("\n\n--- testSetGetInterfaceBound bindAddr=[%s], nif=[%s]", bindAddr, nif));
try (MulticastSocket ms = new MulticastSocket(bindAddr)) {
ms.setNetworkInterface(nif);
NetworkInterface msNetIf = ms.getNetworkInterface();
assertEquals(msNetIf, nif);
}
}
@Test(dataProvider = "scenarios")
public void testSetGetInterfaceUnbound(InetSocketAddress ignore, NetworkInterface nif)
throws Exception
{
out.println(format("\n\n--- testSetGetInterfaceUnbound nif=[%s]", nif));
try (MulticastSocket ms = new MulticastSocket()) {
ms.setNetworkInterface(nif);
NetworkInterface msNetIf = ms.getNetworkInterface();
assertEquals(msNetIf, nif);
}
}
@Test(dataProvider = "scenarios")
public void testSetGetOptionBound(InetSocketAddress bindAddr, NetworkInterface nif)
throws Exception
{
out.println(format("\n\n--- testSetGetOptionBound bindAddr=[%s], nif=[%s]", bindAddr, nif));
try (MulticastSocket ms = new MulticastSocket(bindAddr)) {
ms.setOption(IP_MULTICAST_IF, nif);
NetworkInterface msNetIf = ms.getOption(IP_MULTICAST_IF);
assertEquals(msNetIf, nif);
}
}
@Test(dataProvider = "scenarios")
public void testSetGetOptionUnbound(InetSocketAddress ignore, NetworkInterface nif)
throws Exception
{
out.println(format("\n\n--- testSetGetOptionUnbound nif=[%s]", nif));
try (MulticastSocket ms = new MulticastSocket()) {
ms.setOption(IP_MULTICAST_IF, nif);
NetworkInterface msNetIf = ms.getOption(IP_MULTICAST_IF);
assertEquals(msNetIf, nif);
}
}
// -- get without set
@DataProvider(name = "bindAddresses")
public Object[][] bindAddresses() throws Exception {
return new Object[][] {
{ new InetSocketAddress(InetAddress.getLocalHost(), 0) },
{ new InetSocketAddress(InetAddress.getLoopbackAddress(), 0) },
};
}
@Test(dataProvider = "bindAddresses")
public void testGetInterfaceBound(InetSocketAddress bindAddr)
throws Exception
{
out.println(format("\n\n--- testGetInterfaceBound bindAddr=[%s]", bindAddr));
try (MulticastSocket ms = new MulticastSocket(bindAddr)) {
assertPlaceHolder(ms.getNetworkInterface());
}
}
@Test
public void testGettInterfaceUnbound() throws Exception {
out.println("\n\n--- testGettInterfaceUnbound ");
try (MulticastSocket ms = new MulticastSocket()) {
assertPlaceHolder(ms.getNetworkInterface());
}
}
@Test(dataProvider = "bindAddresses")
public void testGetOptionBound(InetSocketAddress bindAddr)
throws Exception
{
out.println(format("\n\n--- testGetOptionBound bindAddr=[%s]", bindAddr));
try (MulticastSocket ms = new MulticastSocket(bindAddr)) {
assertEquals(ms.getOption(IP_MULTICAST_IF), null);
}
}
@Test
public void testGetOptionUnbound() throws Exception {
out.println("\n\n--- testGetOptionUnbound ");
try (MulticastSocket ms = new MulticastSocket()) {
assertEquals(ms.getOption(IP_MULTICAST_IF), null);
}
}
// Asserts that the placeholder NetworkInterface has a single InetAddress
// that represent any local address.
static void assertPlaceHolder(NetworkInterface nif) {
List<InetAddress> addrs = nif.inetAddresses().collect(toList());
assertEquals(addrs.size(), 1);
assertTrue(addrs.get(0).isAnyLocalAddress());
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -338,9 +338,12 @@ public class LocaleProviders {
var nfExpectedList = List.of("123", "123.4");
var ifExpectedList = List.of("123", "123");
var defLoc = Locale.getDefault(Locale.Category.FORMAT);
var type = LocaleProviderAdapter.getAdapter(CalendarNameProvider.class, Locale.US)
.getAdapterType();
if (type == LocaleProviderAdapter.Type.HOST && (IS_WINDOWS || IS_MAC)) {
if (defLoc.equals(Locale.US) &&
type == LocaleProviderAdapter.Type.HOST &&
(IS_WINDOWS || IS_MAC)) {
final var numf = NumberFormat.getNumberInstance(Locale.US);
final var intf = NumberFormat.getIntegerInstance(Locale.US);
@ -366,6 +369,7 @@ public class LocaleProviders {
System.out.println("bug8232860Test succeeded.");
} else {
System.out.println("Test ignored. Either :-\n" +
"Default format locale is not Locale.US: " + defLoc + ", or\n" +
"OS is neither macOS/Windows, or\n" +
"provider is not HOST: " + type);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
* @test
* @bug 6336885 7196799 7197573 7198834 8000245 8000615 8001440 8008577
* 8010666 8013086 8013233 8013903 8015960 8028771 8054482 8062006
* 8150432 8215913 8220227 8228465 8232871 8232860
* 8150432 8215913 8220227 8228465 8232871 8232860 8236495
* @summary tests for "java.locale.providers" system property
* @library /test/lib
* @build LocaleProviders

View file

@ -83,11 +83,11 @@ public abstract class PKCS11Test {
static {
// hack
String absBase = new File(BASE).getAbsolutePath();
int k = absBase.indexOf(SEP + "test" + SEP + "sun" + SEP);
int k = absBase.indexOf(SEP + "test" + SEP + "jdk" + SEP);
if (k < 0) k = 0;
String p1 = absBase.substring(0, k + 6);
String p2 = absBase.substring(k + 5);
CLOSED_BASE = p1 + "closed" + p2;
String p1 = absBase.substring(0, k);
String p2 = absBase.substring(k);
CLOSED_BASE = p1 + "/../closed" + p2;
// set it as a system property to make it available in policy file
System.setProperty("closed.base", CLOSED_BASE);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it