mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 19:44:41 +02:00
Merge
This commit is contained in:
commit
1a7cfb7023
37 changed files with 986 additions and 334 deletions
|
@ -1302,22 +1302,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||||
|
|
||||||
const Register ic_reg = rax;
|
const Register ic_reg = rax;
|
||||||
const Register receiver = j_rarg0;
|
const Register receiver = j_rarg0;
|
||||||
const Register tmp = rdx;
|
|
||||||
|
|
||||||
Label ok;
|
Label ok;
|
||||||
Label exception_pending;
|
Label exception_pending;
|
||||||
|
|
||||||
|
assert_different_registers(ic_reg, receiver, rscratch1);
|
||||||
__ verify_oop(receiver);
|
__ verify_oop(receiver);
|
||||||
__ push(tmp); // spill (any other registers free here???)
|
__ load_klass(rscratch1, receiver);
|
||||||
__ load_klass(tmp, receiver);
|
__ cmpq(ic_reg, rscratch1);
|
||||||
__ cmpq(ic_reg, tmp);
|
|
||||||
__ jcc(Assembler::equal, ok);
|
__ jcc(Assembler::equal, ok);
|
||||||
|
|
||||||
__ pop(tmp);
|
|
||||||
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||||
|
|
||||||
__ bind(ok);
|
__ bind(ok);
|
||||||
__ pop(tmp);
|
|
||||||
|
|
||||||
// Verified entry point must be aligned
|
// Verified entry point must be aligned
|
||||||
__ align(8);
|
__ align(8);
|
||||||
|
|
|
@ -420,6 +420,13 @@ Form::DataType InstructForm::is_ideal_load() const {
|
||||||
return _matrule->is_ideal_load();
|
return _matrule->is_ideal_load();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return 'true' if this instruction matches an ideal 'LoadKlass' node
|
||||||
|
bool InstructForm::skip_antidep_check() const {
|
||||||
|
if( _matrule == NULL ) return false;
|
||||||
|
|
||||||
|
return _matrule->skip_antidep_check();
|
||||||
|
}
|
||||||
|
|
||||||
// Return 'true' if this instruction matches an ideal 'Load?' node
|
// Return 'true' if this instruction matches an ideal 'Load?' node
|
||||||
Form::DataType InstructForm::is_ideal_store() const {
|
Form::DataType InstructForm::is_ideal_store() const {
|
||||||
if( _matrule == NULL ) return Form::none;
|
if( _matrule == NULL ) return Form::none;
|
||||||
|
@ -567,6 +574,8 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
|
||||||
|
|
||||||
// loads from memory, so must check for anti-dependence
|
// loads from memory, so must check for anti-dependence
|
||||||
bool InstructForm::needs_anti_dependence_check(FormDict &globals) const {
|
bool InstructForm::needs_anti_dependence_check(FormDict &globals) const {
|
||||||
|
if ( skip_antidep_check() ) return false;
|
||||||
|
|
||||||
// Machine independent loads must be checked for anti-dependences
|
// Machine independent loads must be checked for anti-dependences
|
||||||
if( is_ideal_load() != Form::none ) return true;
|
if( is_ideal_load() != Form::none ) return true;
|
||||||
|
|
||||||
|
@ -3957,6 +3966,28 @@ Form::DataType MatchRule::is_ideal_load() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool MatchRule::skip_antidep_check() const {
|
||||||
|
// Some loads operate on what is effectively immutable memory so we
|
||||||
|
// should skip the anti dep computations. For some of these nodes
|
||||||
|
// the rewritable field keeps the anti dep logic from triggering but
|
||||||
|
// for certain kinds of LoadKlass it does not since they are
|
||||||
|
// actually reading memory which could be rewritten by the runtime,
|
||||||
|
// though never by generated code. This disables it uniformly for
|
||||||
|
// the nodes that behave like this: LoadKlass, LoadNKlass and
|
||||||
|
// LoadRange.
|
||||||
|
if ( _opType && (strcmp(_opType,"Set") == 0) && _rChild ) {
|
||||||
|
const char *opType = _rChild->_opType;
|
||||||
|
if (strcmp("LoadKlass", opType) == 0 ||
|
||||||
|
strcmp("LoadNKlass", opType) == 0 ||
|
||||||
|
strcmp("LoadRange", opType) == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Form::DataType MatchRule::is_ideal_store() const {
|
Form::DataType MatchRule::is_ideal_store() const {
|
||||||
Form::DataType ideal_store = Form::none;
|
Form::DataType ideal_store = Form::none;
|
||||||
|
|
||||||
|
|
|
@ -158,6 +158,9 @@ public:
|
||||||
|
|
||||||
virtual Form::CallType is_ideal_call() const; // matches ideal 'Call'
|
virtual Form::CallType is_ideal_call() const; // matches ideal 'Call'
|
||||||
virtual Form::DataType is_ideal_load() const; // node matches ideal 'LoadXNode'
|
virtual Form::DataType is_ideal_load() const; // node matches ideal 'LoadXNode'
|
||||||
|
// Should antidep checks be disabled for this Instruct
|
||||||
|
// See definition of MatchRule::skip_antidep_check
|
||||||
|
bool skip_antidep_check() const;
|
||||||
virtual Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
virtual Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
||||||
bool is_ideal_mem() const { return is_ideal_load() != Form::none || is_ideal_store() != Form::none; }
|
bool is_ideal_mem() const { return is_ideal_load() != Form::none || is_ideal_store() != Form::none; }
|
||||||
virtual uint two_address(FormDict &globals); // output reg must match input reg
|
virtual uint two_address(FormDict &globals); // output reg must match input reg
|
||||||
|
@ -1003,6 +1006,9 @@ public:
|
||||||
bool is_ideal_loopEnd() const; // node matches ideal 'LoopEnd'
|
bool is_ideal_loopEnd() const; // node matches ideal 'LoopEnd'
|
||||||
bool is_ideal_bool() const; // node matches ideal 'Bool'
|
bool is_ideal_bool() const; // node matches ideal 'Bool'
|
||||||
Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
|
Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
|
||||||
|
// Should antidep checks be disabled for this rule
|
||||||
|
// See definition of MatchRule::skip_antidep_check
|
||||||
|
bool skip_antidep_check() const;
|
||||||
Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
|
||||||
|
|
||||||
// Check if 'mRule2' is a cisc-spill variant of this MatchRule
|
// Check if 'mRule2' is a cisc-spill variant of this MatchRule
|
||||||
|
|
|
@ -3231,6 +3231,16 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||||
this_klass->set_minor_version(minor_version);
|
this_klass->set_minor_version(minor_version);
|
||||||
this_klass->set_major_version(major_version);
|
this_klass->set_major_version(major_version);
|
||||||
|
|
||||||
|
// Set up methodOop::intrinsic_id as soon as we know the names of methods.
|
||||||
|
// (We used to do this lazily, but now we query it in Rewriter,
|
||||||
|
// which is eagerly done for every method, so we might as well do it now,
|
||||||
|
// when everything is fresh in memory.)
|
||||||
|
if (methodOopDesc::klass_id_for_intrinsics(this_klass->as_klassOop()) != vmSymbols::NO_SID) {
|
||||||
|
for (int j = 0; j < methods->length(); j++) {
|
||||||
|
((methodOop)methods->obj_at(j))->init_intrinsic_id();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (cached_class_file_bytes != NULL) {
|
if (cached_class_file_bytes != NULL) {
|
||||||
// JVMTI: we have an instanceKlass now, tell it about the cached bytes
|
// JVMTI: we have an instanceKlass now, tell it about the cached bytes
|
||||||
this_klass->set_cached_class_file(cached_class_file_bytes,
|
this_klass->set_cached_class_file(cached_class_file_bytes,
|
||||||
|
|
|
@ -513,9 +513,6 @@
|
||||||
//
|
//
|
||||||
// for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
|
// for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
|
||||||
#define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
#define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
||||||
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
|
|
||||||
/* (symbol object_initializer_name defined above) */ \
|
|
||||||
\
|
|
||||||
do_intrinsic(_hashCode, java_lang_Object, hashCode_name, void_int_signature, F_R) \
|
do_intrinsic(_hashCode, java_lang_Object, hashCode_name, void_int_signature, F_R) \
|
||||||
do_name( hashCode_name, "hashCode") \
|
do_name( hashCode_name, "hashCode") \
|
||||||
do_intrinsic(_getClass, java_lang_Object, getClass_name, void_class_signature, F_R) \
|
do_intrinsic(_getClass, java_lang_Object, getClass_name, void_class_signature, F_R) \
|
||||||
|
@ -635,9 +632,6 @@
|
||||||
do_intrinsic(_equalsC, java_util_Arrays, equals_name, equalsC_signature, F_S) \
|
do_intrinsic(_equalsC, java_util_Arrays, equals_name, equalsC_signature, F_S) \
|
||||||
do_signature(equalsC_signature, "([C[C)Z") \
|
do_signature(equalsC_signature, "([C[C)Z") \
|
||||||
\
|
\
|
||||||
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
|
|
||||||
/* (symbols invoke_name and invoke_signature defined above) */ \
|
|
||||||
\
|
|
||||||
do_intrinsic(_compareTo, java_lang_String, compareTo_name, string_int_signature, F_R) \
|
do_intrinsic(_compareTo, java_lang_String, compareTo_name, string_int_signature, F_R) \
|
||||||
do_name( compareTo_name, "compareTo") \
|
do_name( compareTo_name, "compareTo") \
|
||||||
do_intrinsic(_indexOf, java_lang_String, indexOf_name, string_int_signature, F_R) \
|
do_intrinsic(_indexOf, java_lang_String, indexOf_name, string_int_signature, F_R) \
|
||||||
|
@ -656,8 +650,6 @@
|
||||||
do_name( attemptUpdate_name, "attemptUpdate") \
|
do_name( attemptUpdate_name, "attemptUpdate") \
|
||||||
do_signature(attemptUpdate_signature, "(JJ)Z") \
|
do_signature(attemptUpdate_signature, "(JJ)Z") \
|
||||||
\
|
\
|
||||||
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
|
|
||||||
\
|
|
||||||
/* support for sun.misc.Unsafe */ \
|
/* support for sun.misc.Unsafe */ \
|
||||||
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
|
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
|
||||||
\
|
\
|
||||||
|
@ -819,10 +811,22 @@
|
||||||
do_name( prefetchReadStatic_name, "prefetchReadStatic") \
|
do_name( prefetchReadStatic_name, "prefetchReadStatic") \
|
||||||
do_intrinsic(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic_name, prefetch_signature, F_SN) \
|
do_intrinsic(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic_name, prefetch_signature, F_SN) \
|
||||||
do_name( prefetchWriteStatic_name, "prefetchWriteStatic") \
|
do_name( prefetchWriteStatic_name, "prefetchWriteStatic") \
|
||||||
|
/*== LAST_COMPILER_INLINE*/ \
|
||||||
|
/*the compiler does have special inlining code for these; bytecode inline is just fine */ \
|
||||||
|
\
|
||||||
|
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
|
||||||
|
\
|
||||||
|
do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
|
||||||
|
/* (symbol object_initializer_name defined above) */ \
|
||||||
|
\
|
||||||
|
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
|
||||||
|
/* (symbols invoke_name and invoke_signature defined above) */ \
|
||||||
|
\
|
||||||
/*end*/
|
/*end*/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Class vmSymbols
|
// Class vmSymbols
|
||||||
|
|
||||||
class vmSymbols: AllStatic {
|
class vmSymbols: AllStatic {
|
||||||
|
@ -935,6 +939,7 @@ class vmIntrinsics: AllStatic {
|
||||||
#undef VM_INTRINSIC_ENUM
|
#undef VM_INTRINSIC_ENUM
|
||||||
|
|
||||||
ID_LIMIT,
|
ID_LIMIT,
|
||||||
|
LAST_COMPILER_INLINE = _prefetchWriteStatic,
|
||||||
FIRST_ID = _none + 1
|
FIRST_ID = _none + 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -972,4 +977,7 @@ public:
|
||||||
static Flags flags_for(ID id);
|
static Flags flags_for(ID id);
|
||||||
|
|
||||||
static const char* short_name_as_C_string(ID id, char* buf, int size);
|
static const char* short_name_as_C_string(ID id, char* buf, int size);
|
||||||
|
|
||||||
|
// Access to intrinsic methods:
|
||||||
|
static methodOop method_for(ID id);
|
||||||
};
|
};
|
||||||
|
|
|
@ -379,7 +379,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||||
if ( loc != NULL ) {
|
if ( loc != NULL ) {
|
||||||
oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
|
oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
|
||||||
oop *derived_loc = loc;
|
oop *derived_loc = loc;
|
||||||
derived_oop_fn(base_loc, derived_loc);
|
oop val = *base_loc;
|
||||||
|
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
|
||||||
|
// Ignore NULL oops and decoded NULL narrow oops which
|
||||||
|
// equal to Universe::narrow_oop_base when a narrow oop
|
||||||
|
// implicit null check is used in compiled code.
|
||||||
|
// The narrow_oop_base could be NULL or be the address
|
||||||
|
// of the page below heap depending on compressed oops mode.
|
||||||
|
} else
|
||||||
|
derived_oop_fn(base_loc, derived_loc);
|
||||||
}
|
}
|
||||||
oms.next();
|
oms.next();
|
||||||
} while (!oms.is_done());
|
} while (!oms.is_done());
|
||||||
|
@ -394,6 +402,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||||
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
||||||
if ( loc != NULL ) {
|
if ( loc != NULL ) {
|
||||||
if ( omv.type() == OopMapValue::oop_value ) {
|
if ( omv.type() == OopMapValue::oop_value ) {
|
||||||
|
oop val = *loc;
|
||||||
|
if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
|
||||||
|
// Ignore NULL oops and decoded NULL narrow oops which
|
||||||
|
// equal to Universe::narrow_oop_base when a narrow oop
|
||||||
|
// implicit null check is used in compiled code.
|
||||||
|
// The narrow_oop_base could be NULL or be the address
|
||||||
|
// of the page below heap depending on compressed oops mode.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||||
!Universe::heap()->is_in_or_null(*loc)) {
|
!Universe::heap()->is_in_or_null(*loc)) {
|
||||||
|
@ -410,6 +427,8 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
oop_fn->do_oop(loc);
|
oop_fn->do_oop(loc);
|
||||||
} else if ( omv.type() == OopMapValue::value_value ) {
|
} else if ( omv.type() == OopMapValue::value_value ) {
|
||||||
|
assert((*loc) == (oop)NULL || !Universe::is_narrow_oop_base(*loc),
|
||||||
|
"found invalid value pointer");
|
||||||
value_fn->do_oop(loc);
|
value_fn->do_oop(loc);
|
||||||
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
||||||
narrowOop *nl = (narrowOop*)loc;
|
narrowOop *nl = (narrowOop*)loc;
|
||||||
|
|
|
@ -233,6 +233,10 @@ class OopMapSet : public ResourceObj {
|
||||||
int heap_size() const;
|
int heap_size() const;
|
||||||
void copy_to(address addr);
|
void copy_to(address addr);
|
||||||
|
|
||||||
|
// Methods oops_do() and all_do() filter out NULL oops and
|
||||||
|
// oop == Universe::narrow_oop_base() before passing oops
|
||||||
|
// to closures.
|
||||||
|
|
||||||
// Iterates through frame for a compiled method
|
// Iterates through frame for a compiled method
|
||||||
static void oops_do (const frame* fr,
|
static void oops_do (const frame* fr,
|
||||||
const RegisterMap* reg_map, OopClosure* f);
|
const RegisterMap* reg_map, OopClosure* f);
|
||||||
|
|
|
@ -273,6 +273,7 @@ Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
|
||||||
compute_index_maps();
|
compute_index_maps();
|
||||||
|
|
||||||
if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
|
if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
|
||||||
|
bool did_rewrite = false;
|
||||||
int i = _methods->length();
|
int i = _methods->length();
|
||||||
while (i-- > 0) {
|
while (i-- > 0) {
|
||||||
methodOop method = (methodOop)_methods->obj_at(i);
|
methodOop method = (methodOop)_methods->obj_at(i);
|
||||||
|
@ -281,9 +282,11 @@ Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
|
||||||
// object for finalization if needed.
|
// object for finalization if needed.
|
||||||
methodHandle m(THREAD, method);
|
methodHandle m(THREAD, method);
|
||||||
rewrite_Object_init(m, CHECK);
|
rewrite_Object_init(m, CHECK);
|
||||||
|
did_rewrite = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
assert(did_rewrite, "must find Object::<init> to rewrite it");
|
||||||
}
|
}
|
||||||
|
|
||||||
// rewrite methods, in two passes
|
// rewrite methods, in two passes
|
||||||
|
|
|
@ -343,6 +343,7 @@ class Universe: AllStatic {
|
||||||
// For UseCompressedOops
|
// For UseCompressedOops
|
||||||
static address* narrow_oop_base_addr() { return &_narrow_oop._base; }
|
static address* narrow_oop_base_addr() { return &_narrow_oop._base; }
|
||||||
static address narrow_oop_base() { return _narrow_oop._base; }
|
static address narrow_oop_base() { return _narrow_oop._base; }
|
||||||
|
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
|
||||||
static int narrow_oop_shift() { return _narrow_oop._shift; }
|
static int narrow_oop_shift() { return _narrow_oop._shift; }
|
||||||
static void set_narrow_oop_base(address base) { _narrow_oop._base = base; }
|
static void set_narrow_oop_base(address base) { _narrow_oop._base = base; }
|
||||||
static void set_narrow_oop_shift(int shift) { _narrow_oop._shift = shift; }
|
static void set_narrow_oop_shift(int shift) { _narrow_oop._shift = shift; }
|
||||||
|
|
|
@ -68,7 +68,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
|
||||||
m->set_constants(NULL);
|
m->set_constants(NULL);
|
||||||
m->set_max_stack(0);
|
m->set_max_stack(0);
|
||||||
m->set_max_locals(0);
|
m->set_max_locals(0);
|
||||||
m->clear_intrinsic_id_cache();
|
m->set_intrinsic_id(vmIntrinsics::_none);
|
||||||
m->set_method_data(NULL);
|
m->set_method_data(NULL);
|
||||||
m->set_interpreter_throwout_count(0);
|
m->set_interpreter_throwout_count(0);
|
||||||
m->set_vtable_index(methodOopDesc::garbage_vtable_index);
|
m->set_vtable_index(methodOopDesc::garbage_vtable_index);
|
||||||
|
|
|
@ -962,26 +962,39 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
|
||||||
return newm;
|
return newm;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
|
vmSymbols::SID methodOopDesc::klass_id_for_intrinsics(klassOop holder) {
|
||||||
assert(vmIntrinsics::_none == 0, "correct coding of default case");
|
|
||||||
const uintptr_t max_cache_uint = right_n_bits((int)(sizeof(_intrinsic_id_cache) * BitsPerByte));
|
|
||||||
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_cache_uint, "else fix cache size");
|
|
||||||
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
|
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
|
||||||
// because we are not loading from core libraries
|
// because we are not loading from core libraries
|
||||||
if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
|
if (instanceKlass::cast(holder)->class_loader() != NULL)
|
||||||
|
return vmSymbols::NO_SID; // regardless of name, no intrinsics here
|
||||||
|
|
||||||
// see if the klass name is well-known:
|
// see if the klass name is well-known:
|
||||||
symbolOop klass_name = instanceKlass::cast(method_holder())->name();
|
symbolOop klass_name = instanceKlass::cast(holder)->name();
|
||||||
vmSymbols::SID klass_id = vmSymbols::find_sid(klass_name);
|
return vmSymbols::find_sid(klass_name);
|
||||||
if (klass_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
|
}
|
||||||
|
|
||||||
|
void methodOopDesc::init_intrinsic_id() {
|
||||||
|
assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
|
||||||
|
const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
|
||||||
|
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
|
||||||
|
|
||||||
|
// the klass name is well-known:
|
||||||
|
vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
|
||||||
|
assert(klass_id != vmSymbols::NO_SID, "caller responsibility");
|
||||||
|
|
||||||
// ditto for method and signature:
|
// ditto for method and signature:
|
||||||
vmSymbols::SID name_id = vmSymbols::find_sid(name());
|
vmSymbols::SID name_id = vmSymbols::find_sid(name());
|
||||||
if (name_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
|
if (name_id == vmSymbols::NO_SID) return;
|
||||||
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
|
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
|
||||||
if (sig_id == vmSymbols::NO_SID) return vmIntrinsics::_none;
|
if (sig_id == vmSymbols::NO_SID) return;
|
||||||
jshort flags = access_flags().as_short();
|
jshort flags = access_flags().as_short();
|
||||||
|
|
||||||
|
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
||||||
|
if (id != vmIntrinsics::_none) {
|
||||||
|
set_intrinsic_id(id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// A few slightly irregular cases:
|
// A few slightly irregular cases:
|
||||||
switch (klass_id) {
|
switch (klass_id) {
|
||||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
|
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
|
||||||
|
@ -992,15 +1005,18 @@ vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
|
||||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
|
case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
|
||||||
// pretend it is the corresponding method in the non-strict class:
|
// pretend it is the corresponding method in the non-strict class:
|
||||||
klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
|
klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
|
||||||
|
id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return intrinsic id if any
|
if (id != vmIntrinsics::_none) {
|
||||||
return vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
// Set up its iid. It is an alias method.
|
||||||
|
set_intrinsic_id(id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// These two methods are static since a GC may move the methodOopDesc
|
// These two methods are static since a GC may move the methodOopDesc
|
||||||
bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
|
bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
|
||||||
bool sig_is_loaded = true;
|
bool sig_is_loaded = true;
|
||||||
|
|
|
@ -104,7 +104,7 @@ class methodOopDesc : public oopDesc {
|
||||||
u2 _max_stack; // Maximum number of entries on the expression stack
|
u2 _max_stack; // Maximum number of entries on the expression stack
|
||||||
u2 _max_locals; // Number of local variables used by this method
|
u2 _max_locals; // Number of local variables used by this method
|
||||||
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
||||||
u1 _intrinsic_id_cache; // Cache for intrinsic_id; 0 or 1+vmInt::ID
|
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||||
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
|
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
|
||||||
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
||||||
u2 _number_of_breakpoints; // fullspeed debugging support
|
u2 _number_of_breakpoints; // fullspeed debugging support
|
||||||
|
@ -224,8 +224,6 @@ class methodOopDesc : public oopDesc {
|
||||||
int highest_tier_compile() { return _highest_tier_compile;}
|
int highest_tier_compile() { return _highest_tier_compile;}
|
||||||
void set_highest_tier_compile(int level) { _highest_tier_compile = level;}
|
void set_highest_tier_compile(int level) { _highest_tier_compile = level;}
|
||||||
|
|
||||||
void clear_intrinsic_id_cache() { _intrinsic_id_cache = 0; }
|
|
||||||
|
|
||||||
// Count of times method was exited via exception while interpreting
|
// Count of times method was exited via exception while interpreting
|
||||||
void interpreter_throwout_increment() {
|
void interpreter_throwout_increment() {
|
||||||
if (_interpreter_throwout_count < 65534) {
|
if (_interpreter_throwout_count < 65534) {
|
||||||
|
@ -571,18 +569,12 @@ class methodOopDesc : public oopDesc {
|
||||||
void set_cached_itable_index(int index) { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
|
void set_cached_itable_index(int index) { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
|
||||||
|
|
||||||
// Support for inlining of intrinsic methods
|
// Support for inlining of intrinsic methods
|
||||||
vmIntrinsics::ID intrinsic_id() const { // returns zero if not an intrinsic
|
vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
|
||||||
const u1& cache = _intrinsic_id_cache;
|
void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
|
||||||
if (cache != 0) {
|
|
||||||
return (vmIntrinsics::ID)(cache - 1);
|
// Helper routines for intrinsic_id() and vmIntrinsics::method().
|
||||||
} else {
|
void init_intrinsic_id(); // updates from _none if a match
|
||||||
vmIntrinsics::ID id = compute_intrinsic_id();
|
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
|
||||||
*(u1*)&cache = ((u1) id) + 1; // force the cache to be non-const
|
|
||||||
vmIntrinsics::verify_method(id, (methodOop) this);
|
|
||||||
assert((vmIntrinsics::ID)(cache - 1) == id, "proper conversion");
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// On-stack replacement support
|
// On-stack replacement support
|
||||||
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
|
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
|
||||||
|
@ -635,9 +627,6 @@ class methodOopDesc : public oopDesc {
|
||||||
void set_size_of_parameters(int size) { _size_of_parameters = size; }
|
void set_size_of_parameters(int size) { _size_of_parameters = size; }
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// Helper routine for intrinsic_id().
|
|
||||||
vmIntrinsics::ID compute_intrinsic_id() const;
|
|
||||||
|
|
||||||
// Inlined elements
|
// Inlined elements
|
||||||
address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
|
address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
|
||||||
address* signature_handler_addr() const { return native_function_addr() + 1; }
|
address* signature_handler_addr() const { return native_function_addr() + 1; }
|
||||||
|
|
|
@ -1789,15 +1789,19 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// Push DecodeN down through phi.
|
// Push DecodeN down through phi.
|
||||||
// The rest of phi graph will transform by split EncodeP node though phis up.
|
// The rest of phi graph will transform by split EncodeP node though phis up.
|
||||||
if (UseNewCode && UseCompressedOops && can_reshape && progress == NULL) {
|
if (UseCompressedOops && can_reshape && progress == NULL) {
|
||||||
bool may_push = true;
|
bool may_push = true;
|
||||||
bool has_decodeN = false;
|
bool has_decodeN = false;
|
||||||
Node* in_decodeN = NULL;
|
Node* in_decodeN = NULL;
|
||||||
for (uint i=1; i<req(); ++i) {// For all paths in
|
for (uint i=1; i<req(); ++i) {// For all paths in
|
||||||
Node *ii = in(i);
|
Node *ii = in(i);
|
||||||
if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
|
if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
|
||||||
has_decodeN = true;
|
// Note: in_decodeN is used only to define the type of new phi.
|
||||||
in_decodeN = ii->in(1);
|
// Find a non dead path otherwise phi type will be wrong.
|
||||||
|
if (ii->in(1)->bottom_type() != Type::TOP) {
|
||||||
|
has_decodeN = true;
|
||||||
|
in_decodeN = ii->in(1);
|
||||||
|
}
|
||||||
} else if (!ii->is_Phi()) {
|
} else if (!ii->is_Phi()) {
|
||||||
may_push = false;
|
may_push = false;
|
||||||
}
|
}
|
||||||
|
@ -1805,7 +1809,6 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||||
|
|
||||||
if (has_decodeN && may_push) {
|
if (has_decodeN && may_push) {
|
||||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||||
// Note: in_decodeN is used only to define the type of new phi here.
|
|
||||||
PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN);
|
PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN);
|
||||||
uint orig_cnt = req();
|
uint orig_cnt = req();
|
||||||
for (uint i=1; i<req(); ++i) {// For all paths in
|
for (uint i=1; i<req(); ++i) {// For all paths in
|
||||||
|
|
|
@ -101,7 +101,8 @@ CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Lazily create intrinsics for intrinsic IDs well-known in the runtime.
|
// Lazily create intrinsics for intrinsic IDs well-known in the runtime.
|
||||||
if (m->intrinsic_id() != vmIntrinsics::_none) {
|
if (m->intrinsic_id() != vmIntrinsics::_none &&
|
||||||
|
m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
|
||||||
CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
|
CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
|
||||||
if (cg != NULL) {
|
if (cg != NULL) {
|
||||||
// Save it for next time:
|
// Save it for next time:
|
||||||
|
@ -440,6 +441,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||||
_orig_pc_slot_offset_in_bytes(0),
|
_orig_pc_slot_offset_in_bytes(0),
|
||||||
_node_bundling_limit(0),
|
_node_bundling_limit(0),
|
||||||
_node_bundling_base(NULL),
|
_node_bundling_base(NULL),
|
||||||
|
_java_calls(0),
|
||||||
|
_inner_loops(0),
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
|
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
|
||||||
_printer(IdealGraphPrinter::printer()),
|
_printer(IdealGraphPrinter::printer()),
|
||||||
|
@ -710,6 +713,8 @@ Compile::Compile( ciEnv* ci_env,
|
||||||
_code_buffer("Compile::Fill_buffer"),
|
_code_buffer("Compile::Fill_buffer"),
|
||||||
_node_bundling_limit(0),
|
_node_bundling_limit(0),
|
||||||
_node_bundling_base(NULL),
|
_node_bundling_base(NULL),
|
||||||
|
_java_calls(0),
|
||||||
|
_inner_loops(0),
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
_trace_opto_output(TraceOptoOutput),
|
_trace_opto_output(TraceOptoOutput),
|
||||||
_printer(NULL),
|
_printer(NULL),
|
||||||
|
@ -1850,22 +1855,26 @@ struct Final_Reshape_Counts : public StackObj {
|
||||||
int _float_count; // count float ops requiring 24-bit precision
|
int _float_count; // count float ops requiring 24-bit precision
|
||||||
int _double_count; // count double ops requiring more precision
|
int _double_count; // count double ops requiring more precision
|
||||||
int _java_call_count; // count non-inlined 'java' calls
|
int _java_call_count; // count non-inlined 'java' calls
|
||||||
|
int _inner_loop_count; // count loops which need alignment
|
||||||
VectorSet _visited; // Visitation flags
|
VectorSet _visited; // Visitation flags
|
||||||
Node_List _tests; // Set of IfNodes & PCTableNodes
|
Node_List _tests; // Set of IfNodes & PCTableNodes
|
||||||
|
|
||||||
Final_Reshape_Counts() :
|
Final_Reshape_Counts() :
|
||||||
_call_count(0), _float_count(0), _double_count(0), _java_call_count(0),
|
_call_count(0), _float_count(0), _double_count(0),
|
||||||
|
_java_call_count(0), _inner_loop_count(0),
|
||||||
_visited( Thread::current()->resource_area() ) { }
|
_visited( Thread::current()->resource_area() ) { }
|
||||||
|
|
||||||
void inc_call_count () { _call_count ++; }
|
void inc_call_count () { _call_count ++; }
|
||||||
void inc_float_count () { _float_count ++; }
|
void inc_float_count () { _float_count ++; }
|
||||||
void inc_double_count() { _double_count++; }
|
void inc_double_count() { _double_count++; }
|
||||||
void inc_java_call_count() { _java_call_count++; }
|
void inc_java_call_count() { _java_call_count++; }
|
||||||
|
void inc_inner_loop_count() { _inner_loop_count++; }
|
||||||
|
|
||||||
int get_call_count () const { return _call_count ; }
|
int get_call_count () const { return _call_count ; }
|
||||||
int get_float_count () const { return _float_count ; }
|
int get_float_count () const { return _float_count ; }
|
||||||
int get_double_count() const { return _double_count; }
|
int get_double_count() const { return _double_count; }
|
||||||
int get_java_call_count() const { return _java_call_count; }
|
int get_java_call_count() const { return _java_call_count; }
|
||||||
|
int get_inner_loop_count() const { return _inner_loop_count; }
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
||||||
|
@ -1877,7 +1886,7 @@ static bool oop_offset_is_sane(const TypeInstPtr* tp) {
|
||||||
|
|
||||||
//------------------------------final_graph_reshaping_impl----------------------
|
//------------------------------final_graph_reshaping_impl----------------------
|
||||||
// Implement items 1-5 from final_graph_reshaping below.
|
// Implement items 1-5 from final_graph_reshaping below.
|
||||||
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
|
||||||
|
|
||||||
if ( n->outcnt() == 0 ) return; // dead node
|
if ( n->outcnt() == 0 ) return; // dead node
|
||||||
uint nop = n->Opcode();
|
uint nop = n->Opcode();
|
||||||
|
@ -1919,13 +1928,13 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
case Op_CmpF:
|
case Op_CmpF:
|
||||||
case Op_CmpF3:
|
case Op_CmpF3:
|
||||||
// case Op_ConvL2F: // longs are split into 32-bit halves
|
// case Op_ConvL2F: // longs are split into 32-bit halves
|
||||||
fpu.inc_float_count();
|
frc.inc_float_count();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Op_ConvF2D:
|
case Op_ConvF2D:
|
||||||
case Op_ConvD2F:
|
case Op_ConvD2F:
|
||||||
fpu.inc_float_count();
|
frc.inc_float_count();
|
||||||
fpu.inc_double_count();
|
frc.inc_double_count();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Count all double operations that may use FPU
|
// Count all double operations that may use FPU
|
||||||
|
@ -1942,7 +1951,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
case Op_ConD:
|
case Op_ConD:
|
||||||
case Op_CmpD:
|
case Op_CmpD:
|
||||||
case Op_CmpD3:
|
case Op_CmpD3:
|
||||||
fpu.inc_double_count();
|
frc.inc_double_count();
|
||||||
break;
|
break;
|
||||||
case Op_Opaque1: // Remove Opaque Nodes before matching
|
case Op_Opaque1: // Remove Opaque Nodes before matching
|
||||||
case Op_Opaque2: // Remove Opaque Nodes before matching
|
case Op_Opaque2: // Remove Opaque Nodes before matching
|
||||||
|
@ -1951,7 +1960,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
case Op_CallStaticJava:
|
case Op_CallStaticJava:
|
||||||
case Op_CallJava:
|
case Op_CallJava:
|
||||||
case Op_CallDynamicJava:
|
case Op_CallDynamicJava:
|
||||||
fpu.inc_java_call_count(); // Count java call site;
|
frc.inc_java_call_count(); // Count java call site;
|
||||||
case Op_CallRuntime:
|
case Op_CallRuntime:
|
||||||
case Op_CallLeaf:
|
case Op_CallLeaf:
|
||||||
case Op_CallLeafNoFP: {
|
case Op_CallLeafNoFP: {
|
||||||
|
@ -1962,7 +1971,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
|
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
|
||||||
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
|
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
|
||||||
if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
|
if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
|
||||||
fpu.inc_call_count(); // Count the call site
|
frc.inc_call_count(); // Count the call site
|
||||||
} else { // See if uncommon argument is shared
|
} else { // See if uncommon argument is shared
|
||||||
Node *n = call->in(TypeFunc::Parms);
|
Node *n = call->in(TypeFunc::Parms);
|
||||||
int nop = n->Opcode();
|
int nop = n->Opcode();
|
||||||
|
@ -1983,11 +1992,11 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
case Op_StoreD:
|
case Op_StoreD:
|
||||||
case Op_LoadD:
|
case Op_LoadD:
|
||||||
case Op_LoadD_unaligned:
|
case Op_LoadD_unaligned:
|
||||||
fpu.inc_double_count();
|
frc.inc_double_count();
|
||||||
goto handle_mem;
|
goto handle_mem;
|
||||||
case Op_StoreF:
|
case Op_StoreF:
|
||||||
case Op_LoadF:
|
case Op_LoadF:
|
||||||
fpu.inc_float_count();
|
frc.inc_float_count();
|
||||||
goto handle_mem;
|
goto handle_mem;
|
||||||
|
|
||||||
case Op_StoreB:
|
case Op_StoreB:
|
||||||
|
@ -2324,6 +2333,12 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
n->subsume_by(btp);
|
n->subsume_by(btp);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case Op_Loop:
|
||||||
|
case Op_CountedLoop:
|
||||||
|
if (n->as_Loop()->is_inner_loop()) {
|
||||||
|
frc.inc_inner_loop_count();
|
||||||
|
}
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
assert( !n->is_Call(), "" );
|
assert( !n->is_Call(), "" );
|
||||||
assert( !n->is_Mem(), "" );
|
assert( !n->is_Mem(), "" );
|
||||||
|
@ -2332,17 +2347,17 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||||
|
|
||||||
// Collect CFG split points
|
// Collect CFG split points
|
||||||
if (n->is_MultiBranch())
|
if (n->is_MultiBranch())
|
||||||
fpu._tests.push(n);
|
frc._tests.push(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------final_graph_reshaping_walk---------------------
|
//------------------------------final_graph_reshaping_walk---------------------
|
||||||
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
|
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
|
||||||
// requires that the walk visits a node's inputs before visiting the node.
|
// requires that the walk visits a node's inputs before visiting the node.
|
||||||
static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &fpu ) {
|
static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
|
||||||
ResourceArea *area = Thread::current()->resource_area();
|
ResourceArea *area = Thread::current()->resource_area();
|
||||||
Unique_Node_List sfpt(area);
|
Unique_Node_List sfpt(area);
|
||||||
|
|
||||||
fpu._visited.set(root->_idx); // first, mark node as visited
|
frc._visited.set(root->_idx); // first, mark node as visited
|
||||||
uint cnt = root->req();
|
uint cnt = root->req();
|
||||||
Node *n = root;
|
Node *n = root;
|
||||||
uint i = 0;
|
uint i = 0;
|
||||||
|
@ -2351,7 +2366,7 @@ static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Re
|
||||||
// Place all non-visited non-null inputs onto stack
|
// Place all non-visited non-null inputs onto stack
|
||||||
Node* m = n->in(i);
|
Node* m = n->in(i);
|
||||||
++i;
|
++i;
|
||||||
if (m != NULL && !fpu._visited.test_set(m->_idx)) {
|
if (m != NULL && !frc._visited.test_set(m->_idx)) {
|
||||||
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
|
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
|
||||||
sfpt.push(m);
|
sfpt.push(m);
|
||||||
cnt = m->req();
|
cnt = m->req();
|
||||||
|
@ -2361,7 +2376,7 @@ static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Re
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Now do post-visit work
|
// Now do post-visit work
|
||||||
final_graph_reshaping_impl( n, fpu );
|
final_graph_reshaping_impl( n, frc );
|
||||||
if (nstack.is_empty())
|
if (nstack.is_empty())
|
||||||
break; // finished
|
break; // finished
|
||||||
n = nstack.node(); // Get node from stack
|
n = nstack.node(); // Get node from stack
|
||||||
|
@ -2442,16 +2457,16 @@ bool Compile::final_graph_reshaping() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Final_Reshape_Counts fpu;
|
Final_Reshape_Counts frc;
|
||||||
|
|
||||||
// Visit everybody reachable!
|
// Visit everybody reachable!
|
||||||
// Allocate stack of size C->unique()/2 to avoid frequent realloc
|
// Allocate stack of size C->unique()/2 to avoid frequent realloc
|
||||||
Node_Stack nstack(unique() >> 1);
|
Node_Stack nstack(unique() >> 1);
|
||||||
final_graph_reshaping_walk(nstack, root(), fpu);
|
final_graph_reshaping_walk(nstack, root(), frc);
|
||||||
|
|
||||||
// Check for unreachable (from below) code (i.e., infinite loops).
|
// Check for unreachable (from below) code (i.e., infinite loops).
|
||||||
for( uint i = 0; i < fpu._tests.size(); i++ ) {
|
for( uint i = 0; i < frc._tests.size(); i++ ) {
|
||||||
MultiBranchNode *n = fpu._tests[i]->as_MultiBranch();
|
MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
|
||||||
// Get number of CFG targets.
|
// Get number of CFG targets.
|
||||||
// Note that PCTables include exception targets after calls.
|
// Note that PCTables include exception targets after calls.
|
||||||
uint required_outcnt = n->required_outcnt();
|
uint required_outcnt = n->required_outcnt();
|
||||||
|
@ -2497,7 +2512,7 @@ bool Compile::final_graph_reshaping() {
|
||||||
// Check that I actually visited all kids. Unreached kids
|
// Check that I actually visited all kids. Unreached kids
|
||||||
// must be infinite loops.
|
// must be infinite loops.
|
||||||
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
|
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
|
||||||
if (!fpu._visited.test(n->fast_out(j)->_idx)) {
|
if (!frc._visited.test(n->fast_out(j)->_idx)) {
|
||||||
record_method_not_compilable("infinite loop");
|
record_method_not_compilable("infinite loop");
|
||||||
return true; // Found unvisited kid; must be unreach
|
return true; // Found unvisited kid; must be unreach
|
||||||
}
|
}
|
||||||
|
@ -2506,13 +2521,14 @@ bool Compile::final_graph_reshaping() {
|
||||||
// If original bytecodes contained a mixture of floats and doubles
|
// If original bytecodes contained a mixture of floats and doubles
|
||||||
// check if the optimizer has made it homogenous, item (3).
|
// check if the optimizer has made it homogenous, item (3).
|
||||||
if( Use24BitFPMode && Use24BitFP &&
|
if( Use24BitFPMode && Use24BitFP &&
|
||||||
fpu.get_float_count() > 32 &&
|
frc.get_float_count() > 32 &&
|
||||||
fpu.get_double_count() == 0 &&
|
frc.get_double_count() == 0 &&
|
||||||
(10 * fpu.get_call_count() < fpu.get_float_count()) ) {
|
(10 * frc.get_call_count() < frc.get_float_count()) ) {
|
||||||
set_24_bit_selection_and_mode( false, true );
|
set_24_bit_selection_and_mode( false, true );
|
||||||
}
|
}
|
||||||
|
|
||||||
set_has_java_calls(fpu.get_java_call_count() > 0);
|
set_java_calls(frc.get_java_call_count());
|
||||||
|
set_inner_loops(frc.get_inner_loop_count());
|
||||||
|
|
||||||
// No infinite loops, no reason to bail out.
|
// No infinite loops, no reason to bail out.
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -223,7 +223,8 @@ class Compile : public Phase {
|
||||||
PhaseCFG* _cfg; // Results of CFG finding
|
PhaseCFG* _cfg; // Results of CFG finding
|
||||||
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
|
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
|
||||||
bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
|
bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
|
||||||
bool _has_java_calls; // True if the method has java calls
|
int _java_calls; // Number of java calls in the method
|
||||||
|
int _inner_loops; // Number of inner loops in the method
|
||||||
Matcher* _matcher; // Engine to map ideal to machine instructions
|
Matcher* _matcher; // Engine to map ideal to machine instructions
|
||||||
PhaseRegAlloc* _regalloc; // Results of register allocation.
|
PhaseRegAlloc* _regalloc; // Results of register allocation.
|
||||||
int _frame_slots; // Size of total frame in stack slots
|
int _frame_slots; // Size of total frame in stack slots
|
||||||
|
@ -505,7 +506,9 @@ class Compile : public Phase {
|
||||||
PhaseCFG* cfg() { return _cfg; }
|
PhaseCFG* cfg() { return _cfg; }
|
||||||
bool select_24_bit_instr() const { return _select_24_bit_instr; }
|
bool select_24_bit_instr() const { return _select_24_bit_instr; }
|
||||||
bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
|
bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
|
||||||
bool has_java_calls() const { return _has_java_calls; }
|
bool has_java_calls() const { return _java_calls > 0; }
|
||||||
|
int java_calls() const { return _java_calls; }
|
||||||
|
int inner_loops() const { return _inner_loops; }
|
||||||
Matcher* matcher() { return _matcher; }
|
Matcher* matcher() { return _matcher; }
|
||||||
PhaseRegAlloc* regalloc() { return _regalloc; }
|
PhaseRegAlloc* regalloc() { return _regalloc; }
|
||||||
int frame_slots() const { return _frame_slots; }
|
int frame_slots() const { return _frame_slots; }
|
||||||
|
@ -532,7 +535,8 @@ class Compile : public Phase {
|
||||||
_in_24_bit_fp_mode = mode;
|
_in_24_bit_fp_mode = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_has_java_calls(bool z) { _has_java_calls = z; }
|
void set_java_calls(int z) { _java_calls = z; }
|
||||||
|
void set_inner_loops(int z) { _inner_loops = z; }
|
||||||
|
|
||||||
// Instruction bits passed off to the VM
|
// Instruction bits passed off to the VM
|
||||||
int code_size() { return _method_size; }
|
int code_size() { return _method_size; }
|
||||||
|
|
|
@ -578,11 +578,24 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
|
||||||
if (phi_alias_idx == alias_idx) {
|
if (phi_alias_idx == alias_idx) {
|
||||||
return orig_phi;
|
return orig_phi;
|
||||||
}
|
}
|
||||||
// have we already created a Phi for this alias index?
|
// Have we recently created a Phi for this alias index?
|
||||||
PhiNode *result = get_map_phi(orig_phi->_idx);
|
PhiNode *result = get_map_phi(orig_phi->_idx);
|
||||||
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
|
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
// Previous check may fail when the same wide memory Phi was split into Phis
|
||||||
|
// for different memory slices. Search all Phis for this region.
|
||||||
|
if (result != NULL) {
|
||||||
|
Node* region = orig_phi->in(0);
|
||||||
|
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
|
||||||
|
Node* phi = region->fast_out(i);
|
||||||
|
if (phi->is_Phi() &&
|
||||||
|
C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
|
||||||
|
assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
|
||||||
|
return phi->as_Phi();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
|
if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
|
||||||
if (C->do_escape_analysis() == true && !C->failing()) {
|
if (C->do_escape_analysis() == true && !C->failing()) {
|
||||||
// Retry compilation without escape analysis.
|
// Retry compilation without escape analysis.
|
||||||
|
@ -595,6 +608,7 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
|
||||||
orig_phi_worklist.append_if_missing(orig_phi);
|
orig_phi_worklist.append_if_missing(orig_phi);
|
||||||
const TypePtr *atype = C->get_adr_type(alias_idx);
|
const TypePtr *atype = C->get_adr_type(alias_idx);
|
||||||
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
|
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
|
||||||
|
C->copy_node_notes_to(result, orig_phi);
|
||||||
set_map_phi(orig_phi->_idx, result);
|
set_map_phi(orig_phi->_idx, result);
|
||||||
igvn->set_type(result, result->bottom_type());
|
igvn->set_type(result, result->bottom_type());
|
||||||
record_for_optimizer(result);
|
record_for_optimizer(result);
|
||||||
|
|
|
@ -1373,11 +1373,12 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
|
||||||
return st;
|
return st;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void GraphKit::pre_barrier(Node* ctl,
|
void GraphKit::pre_barrier(Node* ctl,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
uint adr_idx,
|
uint adr_idx,
|
||||||
Node *val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt) {
|
BasicType bt) {
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
|
@ -1385,7 +1386,7 @@ void GraphKit::pre_barrier(Node* ctl,
|
||||||
switch (bs->kind()) {
|
switch (bs->kind()) {
|
||||||
case BarrierSet::G1SATBCT:
|
case BarrierSet::G1SATBCT:
|
||||||
case BarrierSet::G1SATBCTLogging:
|
case BarrierSet::G1SATBCTLogging:
|
||||||
g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
|
g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableModRef:
|
||||||
|
@ -1404,8 +1405,8 @@ void GraphKit::post_barrier(Node* ctl,
|
||||||
Node* store,
|
Node* store,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
uint adr_idx,
|
uint adr_idx,
|
||||||
Node *val,
|
Node* val,
|
||||||
BasicType bt,
|
BasicType bt,
|
||||||
bool use_precise) {
|
bool use_precise) {
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
|
@ -1413,7 +1414,7 @@ void GraphKit::post_barrier(Node* ctl,
|
||||||
switch (bs->kind()) {
|
switch (bs->kind()) {
|
||||||
case BarrierSet::G1SATBCT:
|
case BarrierSet::G1SATBCT:
|
||||||
case BarrierSet::G1SATBCTLogging:
|
case BarrierSet::G1SATBCTLogging:
|
||||||
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
|
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BarrierSet::CardTableModRef:
|
case BarrierSet::CardTableModRef:
|
||||||
|
@ -1431,42 +1432,36 @@ void GraphKit::post_barrier(Node* ctl,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* GraphKit::store_oop_to_object(Node* ctl,
|
Node* GraphKit::store_oop(Node* ctl,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node *val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt) {
|
BasicType bt,
|
||||||
|
bool use_precise) {
|
||||||
|
|
||||||
|
set_control(ctl);
|
||||||
|
if (stopped()) return top(); // Dead path ?
|
||||||
|
|
||||||
|
assert(bt == T_OBJECT, "sanity");
|
||||||
|
assert(val != NULL, "not dead path");
|
||||||
uint adr_idx = C->get_alias_index(adr_type);
|
uint adr_idx = C->get_alias_index(adr_type);
|
||||||
Node* store;
|
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
|
||||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
|
||||||
store = store_to_memory(control(), adr, val, bt, adr_idx);
|
pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
|
||||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, false);
|
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
|
||||||
return store;
|
post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
|
||||||
}
|
|
||||||
|
|
||||||
Node* GraphKit::store_oop_to_array(Node* ctl,
|
|
||||||
Node* obj,
|
|
||||||
Node* adr,
|
|
||||||
const TypePtr* adr_type,
|
|
||||||
Node *val,
|
|
||||||
const TypeOopPtr* val_type,
|
|
||||||
BasicType bt) {
|
|
||||||
uint adr_idx = C->get_alias_index(adr_type);
|
|
||||||
Node* store;
|
|
||||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
|
||||||
store = store_to_memory(control(), adr, val, bt, adr_idx);
|
|
||||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
|
|
||||||
return store;
|
return store;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Could be an array or object we don't know at compile time (unsafe ref.)
|
||||||
Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
||||||
Node* obj,
|
Node* obj, // containing obj
|
||||||
Node* adr,
|
Node* adr, // actual adress to store val at
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node *val,
|
Node* val,
|
||||||
BasicType bt) {
|
BasicType bt) {
|
||||||
Compile::AliasType* at = C->alias_type(adr_type);
|
Compile::AliasType* at = C->alias_type(adr_type);
|
||||||
const TypeOopPtr* val_type = NULL;
|
const TypeOopPtr* val_type = NULL;
|
||||||
if (adr_type->isa_instptr()) {
|
if (adr_type->isa_instptr()) {
|
||||||
|
@ -1485,12 +1480,7 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
||||||
if (val_type == NULL) {
|
if (val_type == NULL) {
|
||||||
val_type = TypeInstPtr::BOTTOM;
|
val_type = TypeInstPtr::BOTTOM;
|
||||||
}
|
}
|
||||||
|
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
|
||||||
uint adr_idx = at->index();
|
|
||||||
pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
|
|
||||||
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
|
|
||||||
post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
|
|
||||||
return store;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1804,93 +1794,6 @@ Node* GraphKit::just_allocated_object(Node* current_control) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//------------------------------store_barrier----------------------------------
|
|
||||||
// Insert a write-barrier store. This is to let generational GC work; we have
|
|
||||||
// to flag all oop-stores before the next GC point.
|
|
||||||
void GraphKit::write_barrier_post(Node* oop_store, Node* obj, Node* adr,
|
|
||||||
Node* val, bool use_precise) {
|
|
||||||
// No store check needed if we're storing a NULL or an old object
|
|
||||||
// (latter case is probably a string constant). The concurrent
|
|
||||||
// mark sweep garbage collector, however, needs to have all nonNull
|
|
||||||
// oop updates flagged via card-marks.
|
|
||||||
if (val != NULL && val->is_Con()) {
|
|
||||||
// must be either an oop or NULL
|
|
||||||
const Type* t = val->bottom_type();
|
|
||||||
if (t == TypePtr::NULL_PTR || t == Type::TOP)
|
|
||||||
// stores of null never (?) need barriers
|
|
||||||
return;
|
|
||||||
ciObject* con = t->is_oopptr()->const_oop();
|
|
||||||
if (con != NULL
|
|
||||||
&& con->is_perm()
|
|
||||||
&& Universe::heap()->can_elide_permanent_oop_store_barriers())
|
|
||||||
// no store barrier needed, because no old-to-new ref created
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_ReduceInitialCardMarks()
|
|
||||||
&& obj == just_allocated_object(control())) {
|
|
||||||
// We can skip marks on a freshly-allocated object.
|
|
||||||
// Keep this code in sync with do_eager_card_mark in runtime.cpp.
|
|
||||||
// That routine eagerly marks the occasional object which is produced
|
|
||||||
// by the slow path, so that we don't have to do it here.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!use_precise) {
|
|
||||||
// All card marks for a (non-array) instance are in one place:
|
|
||||||
adr = obj;
|
|
||||||
}
|
|
||||||
// (Else it's an array (or unknown), and we want more precise card marks.)
|
|
||||||
assert(adr != NULL, "");
|
|
||||||
|
|
||||||
// Get the alias_index for raw card-mark memory
|
|
||||||
int adr_type = Compile::AliasIdxRaw;
|
|
||||||
// Convert the pointer to an int prior to doing math on it
|
|
||||||
Node* cast = _gvn.transform(new (C, 2) CastP2XNode(control(), adr));
|
|
||||||
// Divide by card size
|
|
||||||
assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
|
|
||||||
"Only one we handle so far.");
|
|
||||||
CardTableModRefBS* ct =
|
|
||||||
(CardTableModRefBS*)(Universe::heap()->barrier_set());
|
|
||||||
Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
|
|
||||||
// We store into a byte array, so do not bother to left-shift by zero
|
|
||||||
Node *c = byte_map_base_node();
|
|
||||||
// Combine
|
|
||||||
Node *sb_ctl = control();
|
|
||||||
Node *sb_adr = _gvn.transform(new (C, 4) AddPNode( top()/*no base ptr*/, c, b ));
|
|
||||||
Node *sb_val = _gvn.intcon(0);
|
|
||||||
// Smash zero into card
|
|
||||||
if( !UseConcMarkSweepGC ) {
|
|
||||||
BasicType bt = T_BYTE;
|
|
||||||
store_to_memory(sb_ctl, sb_adr, sb_val, bt, adr_type);
|
|
||||||
} else {
|
|
||||||
// Specialized path for CM store barrier
|
|
||||||
cms_card_mark( sb_ctl, sb_adr, sb_val, oop_store);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Specialized path for CMS store barrier
|
|
||||||
void GraphKit::cms_card_mark(Node* ctl, Node* adr, Node* val, Node *oop_store) {
|
|
||||||
BasicType bt = T_BYTE;
|
|
||||||
int adr_idx = Compile::AliasIdxRaw;
|
|
||||||
Node* mem = memory(adr_idx);
|
|
||||||
|
|
||||||
// The type input is NULL in PRODUCT builds
|
|
||||||
const TypePtr* type = NULL;
|
|
||||||
debug_only(type = C->get_adr_type(adr_idx));
|
|
||||||
|
|
||||||
// Add required edge to oop_store, optimizer does not support precedence edges.
|
|
||||||
// Convert required edge to precedence edge before allocation.
|
|
||||||
Node *store = _gvn.transform( new (C, 5) StoreCMNode(ctl, mem, adr, type, val, oop_store) );
|
|
||||||
set_memory(store, adr_idx);
|
|
||||||
|
|
||||||
// For CMS, back-to-back card-marks can only remove the first one
|
|
||||||
// and this requires DU info. Push on worklist for optimizer.
|
|
||||||
if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
|
|
||||||
record_for_igvn(store);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void GraphKit::round_double_arguments(ciMethod* dest_method) {
|
void GraphKit::round_double_arguments(ciMethod* dest_method) {
|
||||||
// (Note: TypeFunc::make has a cache that makes this fast.)
|
// (Note: TypeFunc::make has a cache that makes this fast.)
|
||||||
const TypeFunc* tf = TypeFunc::make(dest_method);
|
const TypeFunc* tf = TypeFunc::make(dest_method);
|
||||||
|
@ -3215,6 +3118,79 @@ InitializeNode* AllocateNode::initialization() {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//----------------------------- store barriers ----------------------------
|
||||||
|
#define __ ideal.
|
||||||
|
|
||||||
|
void GraphKit::sync_kit(IdealKit& ideal) {
|
||||||
|
// Final sync IdealKit and graphKit.
|
||||||
|
__ drain_delay_transform();
|
||||||
|
set_all_memory(__ merged_memory());
|
||||||
|
set_control(__ ctrl());
|
||||||
|
}
|
||||||
|
|
||||||
|
// vanilla/CMS post barrier
|
||||||
|
// Insert a write-barrier store. This is to let generational GC work; we have
|
||||||
|
// to flag all oop-stores before the next GC point.
|
||||||
|
void GraphKit::write_barrier_post(Node* oop_store,
|
||||||
|
Node* obj,
|
||||||
|
Node* adr,
|
||||||
|
Node* val,
|
||||||
|
bool use_precise) {
|
||||||
|
// No store check needed if we're storing a NULL or an old object
|
||||||
|
// (latter case is probably a string constant). The concurrent
|
||||||
|
// mark sweep garbage collector, however, needs to have all nonNull
|
||||||
|
// oop updates flagged via card-marks.
|
||||||
|
if (val != NULL && val->is_Con()) {
|
||||||
|
// must be either an oop or NULL
|
||||||
|
const Type* t = val->bottom_type();
|
||||||
|
if (t == TypePtr::NULL_PTR || t == Type::TOP)
|
||||||
|
// stores of null never (?) need barriers
|
||||||
|
return;
|
||||||
|
ciObject* con = t->is_oopptr()->const_oop();
|
||||||
|
if (con != NULL
|
||||||
|
&& con->is_perm()
|
||||||
|
&& Universe::heap()->can_elide_permanent_oop_store_barriers())
|
||||||
|
// no store barrier needed, because no old-to-new ref created
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_precise) {
|
||||||
|
// All card marks for a (non-array) instance are in one place:
|
||||||
|
adr = obj;
|
||||||
|
}
|
||||||
|
// (Else it's an array (or unknown), and we want more precise card marks.)
|
||||||
|
assert(adr != NULL, "");
|
||||||
|
|
||||||
|
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||||
|
|
||||||
|
// Convert the pointer to an int prior to doing math on it
|
||||||
|
Node* cast = __ CastPX(__ ctrl(), adr);
|
||||||
|
|
||||||
|
// Divide by card size
|
||||||
|
assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
|
||||||
|
"Only one we handle so far.");
|
||||||
|
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
||||||
|
|
||||||
|
// Combine card table base and card offset
|
||||||
|
Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
|
||||||
|
|
||||||
|
// Get the alias_index for raw card-mark memory
|
||||||
|
int adr_type = Compile::AliasIdxRaw;
|
||||||
|
// Smash zero into card
|
||||||
|
Node* zero = __ ConI(0);
|
||||||
|
BasicType bt = T_BYTE;
|
||||||
|
if( !UseConcMarkSweepGC ) {
|
||||||
|
__ store(__ ctrl(), card_adr, zero, bt, adr_type);
|
||||||
|
} else {
|
||||||
|
// Specialized path for CM store barrier
|
||||||
|
__ storeCM(__ ctrl(), card_adr, zero, oop_store, bt, adr_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final sync IdealKit and GraphKit.
|
||||||
|
sync_kit(ideal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// G1 pre/post barriers
|
||||||
void GraphKit::g1_write_barrier_pre(Node* obj,
|
void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
uint alias_idx,
|
uint alias_idx,
|
||||||
|
@ -3222,10 +3198,8 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt) {
|
BasicType bt) {
|
||||||
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||||
#define __ ideal.
|
|
||||||
__ declares_done();
|
|
||||||
|
|
||||||
Node* thread = __ thread();
|
Node* tls = __ thread(); // ThreadLocalStorage
|
||||||
|
|
||||||
Node* no_ctrl = NULL;
|
Node* no_ctrl = NULL;
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
|
@ -3248,9 +3222,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||||
|
|
||||||
// set_control( ctl);
|
// set_control( ctl);
|
||||||
|
|
||||||
Node* marking_adr = __ AddP(no_base, thread, __ ConX(marking_offset));
|
Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
|
||||||
Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset));
|
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
|
||||||
Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset));
|
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
|
||||||
|
|
||||||
// Now some of the values
|
// Now some of the values
|
||||||
|
|
||||||
|
@ -3278,55 +3252,52 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
|
||||||
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
||||||
Node* next_indexX = next_index;
|
Node* next_indexX = next_index;
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// We could refine the type for what it's worth
|
// We could refine the type for what it's worth
|
||||||
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
// const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
|
||||||
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
|
||||||
#endif // _LP64
|
#endif
|
||||||
|
|
||||||
// Now get the buffer location we will log the original value into and store it
|
// Now get the buffer location we will log the original value into and store it
|
||||||
|
|
||||||
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
|
||||||
// __ store(__ ctrl(), log_addr, orig, T_OBJECT, C->get_alias_index(TypeOopPtr::BOTTOM));
|
|
||||||
__ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
|
|
||||||
// update the index
|
// update the index
|
||||||
// __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
|
||||||
// This is a hack to force this store to occur before the oop store that is coming up
|
|
||||||
__ store(__ ctrl(), index_adr, next_index, T_INT, C->get_alias_index(TypeOopPtr::BOTTOM));
|
|
||||||
|
|
||||||
} __ else_(); {
|
} __ else_(); {
|
||||||
|
|
||||||
// logging buffer is full, call the runtime
|
// logging buffer is full, call the runtime
|
||||||
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
|
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
|
||||||
// __ make_leaf_call(tf, OptoRuntime::g1_wb_pre_Java(), "g1_wb_pre", orig, thread);
|
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
|
||||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, thread);
|
} __ end_if(); // (!index)
|
||||||
} __ end_if();
|
} __ end_if(); // (orig != NULL)
|
||||||
} __ end_if();
|
} __ end_if(); // (!marking)
|
||||||
} __ end_if();
|
|
||||||
|
|
||||||
__ drain_delay_transform();
|
// Final sync IdealKit and GraphKit.
|
||||||
set_control( __ ctrl());
|
sync_kit(ideal);
|
||||||
set_all_memory( __ merged_memory());
|
|
||||||
|
|
||||||
#undef __
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Update the card table and add card address to the queue
|
// Update the card table and add card address to the queue
|
||||||
//
|
//
|
||||||
void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr, Node* buffer, const TypeFunc* tf) {
|
void GraphKit::g1_mark_card(IdealKit& ideal,
|
||||||
#define __ ideal->
|
Node* card_adr,
|
||||||
|
Node* oop_store,
|
||||||
|
Node* index,
|
||||||
|
Node* index_adr,
|
||||||
|
Node* buffer,
|
||||||
|
const TypeFunc* tf) {
|
||||||
|
|
||||||
Node* zero = __ ConI(0);
|
Node* zero = __ ConI(0);
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
BasicType card_bt = T_BYTE;
|
BasicType card_bt = T_BYTE;
|
||||||
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
// Smash zero into card. MUST BE ORDERED WRT TO STORE
|
||||||
__ storeCM(__ ctrl(), card_adr, zero, store, card_bt, Compile::AliasIdxRaw);
|
__ storeCM(__ ctrl(), card_adr, zero, oop_store, card_bt, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
// Now do the queue work
|
// Now do the queue work
|
||||||
__ if_then(index, BoolTest::ne, zero); {
|
__ if_then(index, BoolTest::ne, zero); {
|
||||||
|
|
||||||
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
|
||||||
Node* next_indexX = next_index;
|
Node* next_indexX = next_index;
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// We could refine the type for what it's worth
|
// We could refine the type for what it's worth
|
||||||
|
@ -3341,10 +3312,10 @@ void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node*
|
||||||
} __ else_(); {
|
} __ else_(); {
|
||||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
#undef __
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GraphKit::g1_write_barrier_post(Node* store,
|
void GraphKit::g1_write_barrier_post(Node* oop_store,
|
||||||
Node* obj,
|
Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
uint alias_idx,
|
uint alias_idx,
|
||||||
|
@ -3369,10 +3340,8 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
||||||
assert(adr != NULL, "");
|
assert(adr != NULL, "");
|
||||||
|
|
||||||
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
IdealKit ideal(gvn(), control(), merged_memory(), true);
|
||||||
#define __ ideal.
|
|
||||||
__ declares_done();
|
|
||||||
|
|
||||||
Node* thread = __ thread();
|
Node* tls = __ thread(); // ThreadLocalStorage
|
||||||
|
|
||||||
Node* no_ctrl = NULL;
|
Node* no_ctrl = NULL;
|
||||||
Node* no_base = __ top();
|
Node* no_base = __ top();
|
||||||
|
@ -3394,8 +3363,8 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
||||||
|
|
||||||
// Pointers into the thread
|
// Pointers into the thread
|
||||||
|
|
||||||
Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset));
|
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
|
||||||
Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset));
|
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
|
||||||
|
|
||||||
// Now some values
|
// Now some values
|
||||||
|
|
||||||
|
@ -3404,18 +3373,14 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
||||||
|
|
||||||
|
|
||||||
// Convert the store obj pointer to an int prior to doing math on it
|
// Convert the store obj pointer to an int prior to doing math on it
|
||||||
// Use addr not obj gets accurate card marks
|
|
||||||
|
|
||||||
// Node* cast = __ CastPX(no_ctrl, adr /* obj */);
|
|
||||||
|
|
||||||
// Must use ctrl to prevent "integerized oop" existing across safepoint
|
// Must use ctrl to prevent "integerized oop" existing across safepoint
|
||||||
Node* cast = __ CastPX(__ ctrl(), ( use_precise ? adr : obj ));
|
Node* cast = __ CastPX(__ ctrl(), adr);
|
||||||
|
|
||||||
// Divide pointer by card size
|
// Divide pointer by card size
|
||||||
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
|
||||||
|
|
||||||
// Combine card table base and card offset
|
// Combine card table base and card offset
|
||||||
Node *card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
|
Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
|
||||||
|
|
||||||
// If we know the value being stored does it cross regions?
|
// If we know the value being stored does it cross regions?
|
||||||
|
|
||||||
|
@ -3439,18 +3404,17 @@ void GraphKit::g1_write_barrier_post(Node* store,
|
||||||
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
|
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
|
||||||
|
|
||||||
__ if_then(card_val, BoolTest::ne, zero); {
|
__ if_then(card_val, BoolTest::ne, zero); {
|
||||||
g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
|
g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
} __ end_if();
|
} __ end_if();
|
||||||
} else {
|
} else {
|
||||||
g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
|
// Object.clone() instrinsic uses this path.
|
||||||
|
g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Final sync IdealKit and GraphKit.
|
||||||
__ drain_delay_transform();
|
sync_kit(ideal);
|
||||||
set_control( __ ctrl());
|
}
|
||||||
set_all_memory( __ merged_memory());
|
|
||||||
#undef __
|
#undef __
|
||||||
|
|
||||||
}
|
|
||||||
|
|
|
@ -449,13 +449,24 @@ class GraphKit : public Phase {
|
||||||
//
|
//
|
||||||
// If val==NULL, it is taken to be a completely unknown value. QQQ
|
// If val==NULL, it is taken to be a completely unknown value. QQQ
|
||||||
|
|
||||||
|
Node* store_oop(Node* ctl,
|
||||||
|
Node* obj, // containing obj
|
||||||
|
Node* adr, // actual adress to store val at
|
||||||
|
const TypePtr* adr_type,
|
||||||
|
Node* val,
|
||||||
|
const TypeOopPtr* val_type,
|
||||||
|
BasicType bt,
|
||||||
|
bool use_precise);
|
||||||
|
|
||||||
Node* store_oop_to_object(Node* ctl,
|
Node* store_oop_to_object(Node* ctl,
|
||||||
Node* obj, // containing obj
|
Node* obj, // containing obj
|
||||||
Node* adr, // actual adress to store val at
|
Node* adr, // actual adress to store val at
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node* val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt);
|
BasicType bt) {
|
||||||
|
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
|
||||||
|
}
|
||||||
|
|
||||||
Node* store_oop_to_array(Node* ctl,
|
Node* store_oop_to_array(Node* ctl,
|
||||||
Node* obj, // containing obj
|
Node* obj, // containing obj
|
||||||
|
@ -463,7 +474,9 @@ class GraphKit : public Phase {
|
||||||
const TypePtr* adr_type,
|
const TypePtr* adr_type,
|
||||||
Node* val,
|
Node* val,
|
||||||
const TypeOopPtr* val_type,
|
const TypeOopPtr* val_type,
|
||||||
BasicType bt);
|
BasicType bt) {
|
||||||
|
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
|
||||||
|
}
|
||||||
|
|
||||||
// Could be an array or object we don't know at compile time (unsafe ref.)
|
// Could be an array or object we don't know at compile time (unsafe ref.)
|
||||||
Node* store_oop_to_unknown(Node* ctl,
|
Node* store_oop_to_unknown(Node* ctl,
|
||||||
|
@ -488,9 +501,6 @@ class GraphKit : public Phase {
|
||||||
// Return a load of array element at idx.
|
// Return a load of array element at idx.
|
||||||
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
|
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
|
||||||
|
|
||||||
// CMS card-marks have an input from the corresponding oop_store
|
|
||||||
void cms_card_mark(Node* ctl, Node* adr, Node* val, Node* oop_store);
|
|
||||||
|
|
||||||
//---------------- Dtrace support --------------------
|
//---------------- Dtrace support --------------------
|
||||||
void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
|
void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
|
||||||
void make_dtrace_method_entry(ciMethod* method) {
|
void make_dtrace_method_entry(ciMethod* method) {
|
||||||
|
@ -582,9 +592,6 @@ class GraphKit : public Phase {
|
||||||
return C->too_many_recompiles(method(), bci(), reason);
|
return C->too_many_recompiles(method(), bci(), reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
// vanilla/CMS post barrier
|
|
||||||
void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
|
|
||||||
|
|
||||||
// Returns the object (if any) which was created the moment before.
|
// Returns the object (if any) which was created the moment before.
|
||||||
Node* just_allocated_object(Node* current_control);
|
Node* just_allocated_object(Node* current_control);
|
||||||
|
|
||||||
|
@ -593,6 +600,11 @@ class GraphKit : public Phase {
|
||||||
&& Universe::heap()->can_elide_tlab_store_barriers());
|
&& Universe::heap()->can_elide_tlab_store_barriers());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sync_kit(IdealKit& ideal);
|
||||||
|
|
||||||
|
// vanilla/CMS post barrier
|
||||||
|
void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
|
||||||
|
|
||||||
// G1 pre/post barriers
|
// G1 pre/post barriers
|
||||||
void g1_write_barrier_pre(Node* obj,
|
void g1_write_barrier_pre(Node* obj,
|
||||||
Node* adr,
|
Node* adr,
|
||||||
|
@ -610,7 +622,7 @@ class GraphKit : public Phase {
|
||||||
bool use_precise);
|
bool use_precise);
|
||||||
// Helper function for g1
|
// Helper function for g1
|
||||||
private:
|
private:
|
||||||
void g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr,
|
void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, Node* index, Node* index_adr,
|
||||||
Node* buffer, const TypeFunc* tf);
|
Node* buffer, const TypeFunc* tf);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
const uint IdealKit::first_var = TypeFunc::Parms + 1;
|
const uint IdealKit::first_var = TypeFunc::Parms + 1;
|
||||||
|
|
||||||
//----------------------------IdealKit-----------------------------------------
|
//----------------------------IdealKit-----------------------------------------
|
||||||
IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms) :
|
IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms, bool has_declarations) :
|
||||||
_gvn(gvn), C(gvn.C) {
|
_gvn(gvn), C(gvn.C) {
|
||||||
_initial_ctrl = control;
|
_initial_ctrl = control;
|
||||||
_initial_memory = mem;
|
_initial_memory = mem;
|
||||||
|
@ -47,6 +47,9 @@ IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_trans
|
||||||
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
||||||
_delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
_delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
|
||||||
DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
|
DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
|
||||||
|
if (!has_declarations) {
|
||||||
|
declarations_done();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------if_then-------------------------------------
|
//-------------------------------if_then-------------------------------------
|
||||||
|
@ -97,7 +100,7 @@ void IdealKit::else_() {
|
||||||
//-------------------------------end_if-------------------------------------
|
//-------------------------------end_if-------------------------------------
|
||||||
// Merge the "then" and "else" cvstates.
|
// Merge the "then" and "else" cvstates.
|
||||||
//
|
//
|
||||||
// The if_then() pushed the current state for later use
|
// The if_then() pushed a copy of the current state for later use
|
||||||
// as the initial state for a future "else" clause. The
|
// as the initial state for a future "else" clause. The
|
||||||
// current state then became the initial state for the
|
// current state then became the initial state for the
|
||||||
// then clause. If an "else" clause was encountered, it will
|
// then clause. If an "else" clause was encountered, it will
|
||||||
|
@ -258,8 +261,8 @@ Node* IdealKit::promote_to_phi(Node* n, Node* reg) {
|
||||||
return delay_transform(PhiNode::make(reg, n, ct));
|
return delay_transform(PhiNode::make(reg, n, ct));
|
||||||
}
|
}
|
||||||
|
|
||||||
//-----------------------------declares_done-----------------------------------
|
//-----------------------------declarations_done-------------------------------
|
||||||
void IdealKit::declares_done() {
|
void IdealKit::declarations_done() {
|
||||||
_cvstate = new_cvstate(); // initialize current cvstate
|
_cvstate = new_cvstate(); // initialize current cvstate
|
||||||
set_ctrl(_initial_ctrl); // initialize control in current cvstate
|
set_ctrl(_initial_ctrl); // initialize control in current cvstate
|
||||||
set_all_memory(_initial_memory);// initialize memory in current cvstate
|
set_all_memory(_initial_memory);// initialize memory in current cvstate
|
||||||
|
@ -277,7 +280,9 @@ Node* IdealKit::transform(Node* n) {
|
||||||
|
|
||||||
//-----------------------------delay_transform-----------------------------------
|
//-----------------------------delay_transform-----------------------------------
|
||||||
Node* IdealKit::delay_transform(Node* n) {
|
Node* IdealKit::delay_transform(Node* n) {
|
||||||
gvn().set_type(n, n->bottom_type());
|
if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
|
||||||
|
gvn().set_type(n, n->bottom_type());
|
||||||
|
}
|
||||||
_delay_transform->push(n);
|
_delay_transform->push(n);
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
@ -321,7 +326,9 @@ IdealVariable::IdealVariable(IdealKit &k) {
|
||||||
Node* IdealKit::memory(uint alias_idx) {
|
Node* IdealKit::memory(uint alias_idx) {
|
||||||
MergeMemNode* mem = merged_memory();
|
MergeMemNode* mem = merged_memory();
|
||||||
Node* p = mem->memory_at(alias_idx);
|
Node* p = mem->memory_at(alias_idx);
|
||||||
_gvn.set_type(p, Type::MEMORY); // must be mapped
|
if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
|
||||||
|
_gvn.set_type(p, Type::MEMORY); // must be mapped
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -462,9 +469,6 @@ void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
|
||||||
const TypePtr* adr_type = TypeRawPtr::BOTTOM;
|
const TypePtr* adr_type = TypeRawPtr::BOTTOM;
|
||||||
uint adr_idx = C->get_alias_index(adr_type);
|
uint adr_idx = C->get_alias_index(adr_type);
|
||||||
|
|
||||||
// Clone initial memory
|
|
||||||
MergeMemNode* cloned_mem = MergeMemNode::make(C, merged_memory());
|
|
||||||
|
|
||||||
// Slow-path leaf call
|
// Slow-path leaf call
|
||||||
int size = slow_call_type->domain()->cnt();
|
int size = slow_call_type->domain()->cnt();
|
||||||
CallNode *call = (CallNode*)new (C, size) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
|
CallNode *call = (CallNode*)new (C, size) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
|
||||||
|
@ -489,9 +493,6 @@ void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
|
||||||
|
|
||||||
set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
|
set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
|
||||||
|
|
||||||
// Set the incoming clone of memory as current memory
|
|
||||||
set_all_memory(cloned_mem);
|
|
||||||
|
|
||||||
// Make memory for the call
|
// Make memory for the call
|
||||||
Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
|
Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@
|
||||||
// Example:
|
// Example:
|
||||||
// Node* limit = ??
|
// Node* limit = ??
|
||||||
// IdealVariable i(kit), j(kit);
|
// IdealVariable i(kit), j(kit);
|
||||||
// declares_done();
|
// declarations_done();
|
||||||
// Node* exit = make_label(1); // 1 goto
|
// Node* exit = make_label(1); // 1 goto
|
||||||
// set(j, ConI(0));
|
// set(j, ConI(0));
|
||||||
// loop(i, ConI(0), BoolTest::lt, limit); {
|
// loop(i, ConI(0), BoolTest::lt, limit); {
|
||||||
|
@ -101,10 +101,7 @@ class IdealKit: public StackObj {
|
||||||
Node* new_cvstate(); // Create a new cvstate
|
Node* new_cvstate(); // Create a new cvstate
|
||||||
Node* cvstate() { return _cvstate; } // current cvstate
|
Node* cvstate() { return _cvstate; } // current cvstate
|
||||||
Node* copy_cvstate(); // copy current cvstate
|
Node* copy_cvstate(); // copy current cvstate
|
||||||
void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); }
|
|
||||||
|
|
||||||
// Should this assert this is a MergeMem???
|
|
||||||
void set_all_memory(Node* mem){ _cvstate->set_req(TypeFunc::Memory, mem); }
|
|
||||||
void set_memory(Node* mem, uint alias_idx );
|
void set_memory(Node* mem, uint alias_idx );
|
||||||
void do_memory_merge(Node* merging, Node* join);
|
void do_memory_merge(Node* merging, Node* join);
|
||||||
void clear(Node* m); // clear a cvstate
|
void clear(Node* m); // clear a cvstate
|
||||||
|
@ -132,15 +129,17 @@ class IdealKit: public StackObj {
|
||||||
Node* memory(uint alias_idx);
|
Node* memory(uint alias_idx);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false);
|
IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false, bool has_declarations = false);
|
||||||
~IdealKit() {
|
~IdealKit() {
|
||||||
stop();
|
stop();
|
||||||
drain_delay_transform();
|
drain_delay_transform();
|
||||||
}
|
}
|
||||||
// Control
|
// Control
|
||||||
Node* ctrl() { return _cvstate->in(TypeFunc::Control); }
|
Node* ctrl() { return _cvstate->in(TypeFunc::Control); }
|
||||||
|
void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); }
|
||||||
Node* top() { return C->top(); }
|
Node* top() { return C->top(); }
|
||||||
MergeMemNode* merged_memory() { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
|
MergeMemNode* merged_memory() { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
|
||||||
|
void set_all_memory(Node* mem) { _cvstate->set_req(TypeFunc::Memory, mem); }
|
||||||
void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
|
void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
|
||||||
Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); }
|
Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); }
|
||||||
void dead(IdealVariable& v) { set(v, (Node*)NULL); }
|
void dead(IdealVariable& v) { set(v, (Node*)NULL); }
|
||||||
|
@ -155,7 +154,7 @@ class IdealKit: public StackObj {
|
||||||
Node* make_label(int goto_ct);
|
Node* make_label(int goto_ct);
|
||||||
void bind(Node* lab);
|
void bind(Node* lab);
|
||||||
void goto_(Node* lab, bool bind = false);
|
void goto_(Node* lab, bool bind = false);
|
||||||
void declares_done();
|
void declarations_done();
|
||||||
void drain_delay_transform();
|
void drain_delay_transform();
|
||||||
|
|
||||||
Node* IfTrue(IfNode* iff) { return transform(new (C,1) IfTrueNode(iff)); }
|
Node* IfTrue(IfNode* iff) { return transform(new (C,1) IfTrueNode(iff)); }
|
||||||
|
|
|
@ -378,7 +378,18 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
|
||||||
|
|
||||||
// Force the original merge dead
|
// Force the original merge dead
|
||||||
igvn->hash_delete(r);
|
igvn->hash_delete(r);
|
||||||
r->set_req_X(0,NULL,igvn);
|
// First, remove region's dead users.
|
||||||
|
for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
|
||||||
|
Node* u = r->last_out(l);
|
||||||
|
if( u == r ) {
|
||||||
|
r->set_req(0, NULL);
|
||||||
|
} else {
|
||||||
|
assert(u->outcnt() == 0, "only dead users");
|
||||||
|
igvn->remove_dead_node(u);
|
||||||
|
}
|
||||||
|
l -= 1;
|
||||||
|
}
|
||||||
|
igvn->remove_dead_node(r);
|
||||||
|
|
||||||
// Now remove the bogus extra edges used to keep things alive
|
// Now remove the bogus extra edges used to keep things alive
|
||||||
igvn->remove_dead_node( hook );
|
igvn->remove_dead_node( hook );
|
||||||
|
|
|
@ -310,11 +310,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||||
if (!InlineAtomicLong) return NULL;
|
if (!InlineAtomicLong) return NULL;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case vmIntrinsics::_Object_init:
|
|
||||||
case vmIntrinsics::_invoke:
|
|
||||||
// We do not intrinsify these; they are marked for other purposes.
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
case vmIntrinsics::_getCallerClass:
|
case vmIntrinsics::_getCallerClass:
|
||||||
if (!UseNewReflection) return NULL;
|
if (!UseNewReflection) return NULL;
|
||||||
if (!InlineReflectionGetCallerClass) return NULL;
|
if (!InlineReflectionGetCallerClass) return NULL;
|
||||||
|
@ -327,6 +322,8 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
|
||||||
|
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -394,18 +391,11 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintIntrinsics) {
|
if (PrintIntrinsics) {
|
||||||
switch (intrinsic_id()) {
|
tty->print("Did not inline intrinsic %s%s at bci:%d in",
|
||||||
case vmIntrinsics::_invoke:
|
vmIntrinsics::name_at(intrinsic_id()),
|
||||||
case vmIntrinsics::_Object_init:
|
(is_virtual() ? " (virtual)" : ""), kit.bci());
|
||||||
// We do not expect to inline these, so do not produce any noise about them.
|
kit.caller()->print_short_name(tty);
|
||||||
break;
|
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
|
||||||
default:
|
|
||||||
tty->print("Did not inline intrinsic %s%s at bci:%d in",
|
|
||||||
vmIntrinsics::name_at(intrinsic_id()),
|
|
||||||
(is_virtual() ? " (virtual)" : ""), kit.bci());
|
|
||||||
kit.caller()->print_short_name(tty);
|
|
||||||
tty->print_cr(" (%d bytes)", kit.caller()->code_size());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1030,7 +1020,7 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
|
||||||
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
|
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
|
||||||
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
|
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
|
||||||
|
|
||||||
IdealKit kit(gvn(), control(), merged_memory());
|
IdealKit kit(gvn(), control(), merged_memory(), false, true);
|
||||||
#define __ kit.
|
#define __ kit.
|
||||||
Node* zero = __ ConI(0);
|
Node* zero = __ ConI(0);
|
||||||
Node* one = __ ConI(1);
|
Node* one = __ ConI(1);
|
||||||
|
@ -1042,7 +1032,7 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
|
||||||
Node* targetOffset = __ ConI(targetOffset_i);
|
Node* targetOffset = __ ConI(targetOffset_i);
|
||||||
Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
|
Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
|
||||||
|
|
||||||
IdealVariable rtn(kit), i(kit), j(kit); __ declares_done();
|
IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
|
||||||
Node* outer_loop = __ make_label(2 /* goto */);
|
Node* outer_loop = __ make_label(2 /* goto */);
|
||||||
Node* return_ = __ make_label(1);
|
Node* return_ = __ make_label(1);
|
||||||
|
|
||||||
|
@ -1079,9 +1069,9 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
|
||||||
__ bind(outer_loop);
|
__ bind(outer_loop);
|
||||||
}__ end_loop(); __ dead(i);
|
}__ end_loop(); __ dead(i);
|
||||||
__ bind(return_);
|
__ bind(return_);
|
||||||
__ drain_delay_transform();
|
|
||||||
|
|
||||||
set_control(__ ctrl());
|
// Final sync IdealKit and GraphKit.
|
||||||
|
sync_kit(kit);
|
||||||
Node* result = __ value(rtn);
|
Node* result = __ value(rtn);
|
||||||
#undef __
|
#undef __
|
||||||
C->set_has_loops(true);
|
C->set_has_loops(true);
|
||||||
|
@ -2183,14 +2173,23 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||||
// of it. So we need to emit code to conditionally do the proper type of
|
// of it. So we need to emit code to conditionally do the proper type of
|
||||||
// store.
|
// store.
|
||||||
|
|
||||||
IdealKit kit(gvn(), control(), merged_memory());
|
IdealKit ideal(gvn(), control(), merged_memory());
|
||||||
kit.declares_done();
|
#define __ ideal.
|
||||||
// QQQ who knows what probability is here??
|
// QQQ who knows what probability is here??
|
||||||
kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
|
__ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
|
||||||
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
|
// Sync IdealKit and graphKit.
|
||||||
} kit.else_(); {
|
set_all_memory( __ merged_memory());
|
||||||
(void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
|
set_control(__ ctrl());
|
||||||
} kit.end_if();
|
Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
|
||||||
|
// Update IdealKit memory.
|
||||||
|
__ set_all_memory(merged_memory());
|
||||||
|
__ set_ctrl(control());
|
||||||
|
} __ else_(); {
|
||||||
|
__ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile);
|
||||||
|
} __ end_if();
|
||||||
|
// Final sync IdealKit and GraphKit.
|
||||||
|
sync_kit(ideal);
|
||||||
|
#undef __
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -346,7 +346,10 @@ Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
|
||||||
|
|
||||||
// Yes! Reshape address expression!
|
// Yes! Reshape address expression!
|
||||||
Node *inv_scale = new (C, 3) LShiftINode( add_invar, scale );
|
Node *inv_scale = new (C, 3) LShiftINode( add_invar, scale );
|
||||||
register_new_node( inv_scale, add_invar_ctrl );
|
Node *inv_scale_ctrl =
|
||||||
|
dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
|
||||||
|
add_invar_ctrl : scale_ctrl;
|
||||||
|
register_new_node( inv_scale, inv_scale_ctrl );
|
||||||
Node *var_scale = new (C, 3) LShiftINode( add_var, scale );
|
Node *var_scale = new (C, 3) LShiftINode( add_var, scale );
|
||||||
register_new_node( var_scale, n_ctrl );
|
register_new_node( var_scale, n_ctrl );
|
||||||
Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
|
Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
|
||||||
|
|
|
@ -300,6 +300,12 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
adr_type = t_disp->add_offset(offset);
|
adr_type = t_disp->add_offset(offset);
|
||||||
|
} else if( base == NULL && offset != 0 && offset != Type::OffsetBot ) {
|
||||||
|
// Use ideal type if it is oop ptr.
|
||||||
|
const TypePtr *tp = oper->type()->isa_ptr();
|
||||||
|
if( tp != NULL) {
|
||||||
|
adr_type = tp;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -198,14 +198,79 @@ void PhaseMacroExpand::extract_call_projections(CallNode *call) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Eliminate a card mark sequence. p2x is a ConvP2XNode
|
// Eliminate a card mark sequence. p2x is a ConvP2XNode
|
||||||
void PhaseMacroExpand::eliminate_card_mark(Node *p2x) {
|
void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
|
||||||
assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
|
assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
|
||||||
Node *shift = p2x->unique_out();
|
if (!UseG1GC) {
|
||||||
Node *addp = shift->unique_out();
|
// vanilla/CMS post barrier
|
||||||
for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
|
Node *shift = p2x->unique_out();
|
||||||
Node *st = addp->last_out(j);
|
Node *addp = shift->unique_out();
|
||||||
assert(st->is_Store(), "store required");
|
for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
|
||||||
_igvn.replace_node(st, st->in(MemNode::Memory));
|
Node *st = addp->last_out(j);
|
||||||
|
assert(st->is_Store(), "store required");
|
||||||
|
_igvn.replace_node(st, st->in(MemNode::Memory));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// G1 pre/post barriers
|
||||||
|
assert(p2x->outcnt() == 2, "expects 2 users: Xor and URShift nodes");
|
||||||
|
// It could be only one user, URShift node, in Object.clone() instrinsic
|
||||||
|
// but the new allocation is passed to arraycopy stub and it could not
|
||||||
|
// be scalar replaced. So we don't check the case.
|
||||||
|
|
||||||
|
// Remove G1 post barrier.
|
||||||
|
|
||||||
|
// Search for CastP2X->Xor->URShift->Cmp path which
|
||||||
|
// checks if the store done to a different from the value's region.
|
||||||
|
// And replace Cmp with #0 (false) to collapse G1 post barrier.
|
||||||
|
Node* xorx = NULL;
|
||||||
|
for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
|
||||||
|
Node* u = p2x->fast_out(i);
|
||||||
|
if (u->Opcode() == Op_XorX) {
|
||||||
|
xorx = u;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(xorx != NULL, "missing G1 post barrier");
|
||||||
|
Node* shift = xorx->unique_out();
|
||||||
|
Node* cmpx = shift->unique_out();
|
||||||
|
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
|
||||||
|
cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
|
||||||
|
"missing region check in G1 post barrier");
|
||||||
|
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
|
||||||
|
|
||||||
|
// Remove G1 pre barrier.
|
||||||
|
|
||||||
|
// Search "if (marking != 0)" check and set it to "false".
|
||||||
|
Node* this_region = p2x->in(0);
|
||||||
|
assert(this_region != NULL, "");
|
||||||
|
// There is no G1 pre barrier if previous stored value is NULL
|
||||||
|
// (for example, after initialization).
|
||||||
|
if (this_region->is_Region() && this_region->req() == 3) {
|
||||||
|
int ind = 1;
|
||||||
|
if (!this_region->in(ind)->is_IfFalse()) {
|
||||||
|
ind = 2;
|
||||||
|
}
|
||||||
|
if (this_region->in(ind)->is_IfFalse()) {
|
||||||
|
Node* bol = this_region->in(ind)->in(0)->in(1);
|
||||||
|
assert(bol->is_Bool(), "");
|
||||||
|
cmpx = bol->in(1);
|
||||||
|
if (bol->as_Bool()->_test._test == BoolTest::ne &&
|
||||||
|
cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
|
||||||
|
cmpx->in(1)->is_Load()) {
|
||||||
|
Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
|
||||||
|
const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||||
|
PtrQueue::byte_offset_of_active());
|
||||||
|
if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
|
||||||
|
adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
|
||||||
|
adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
|
||||||
|
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Now CastP2X can be removed since it is used only on dead path
|
||||||
|
// which currently still alive until igvn optimize it.
|
||||||
|
assert(p2x->unique_out()->Opcode() == Op_URShiftX, "");
|
||||||
|
_igvn.replace_node(p2x, top());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -760,14 +825,11 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
|
||||||
if (n->is_Store()) {
|
if (n->is_Store()) {
|
||||||
_igvn.replace_node(n, n->in(MemNode::Memory));
|
_igvn.replace_node(n, n->in(MemNode::Memory));
|
||||||
} else {
|
} else {
|
||||||
assert( n->Opcode() == Op_CastP2X, "CastP2X required");
|
|
||||||
eliminate_card_mark(n);
|
eliminate_card_mark(n);
|
||||||
}
|
}
|
||||||
k -= (oc2 - use->outcnt());
|
k -= (oc2 - use->outcnt());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert( !use->is_SafePoint(), "safepoint uses must have been already elimiated");
|
|
||||||
assert( use->Opcode() == Op_CastP2X, "CastP2X required");
|
|
||||||
eliminate_card_mark(use);
|
eliminate_card_mark(use);
|
||||||
}
|
}
|
||||||
j -= (oc1 - res->outcnt());
|
j -= (oc1 - res->outcnt());
|
||||||
|
|
|
@ -1489,8 +1489,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Verify adr type after matching memory operation
|
// Verify adr type after matching memory operation
|
||||||
const MachOper* oper = mach->memory_operand();
|
const MachOper* oper = mach->memory_operand();
|
||||||
if (oper != NULL && oper != (MachOper*)-1 &&
|
if (oper != NULL && oper != (MachOper*)-1) {
|
||||||
mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode
|
|
||||||
// It has a unique memory operand. Find corresponding ideal mem node.
|
// It has a unique memory operand. Find corresponding ideal mem node.
|
||||||
Node* m = NULL;
|
Node* m = NULL;
|
||||||
if (leaf->is_Mem()) {
|
if (leaf->is_Mem()) {
|
||||||
|
|
|
@ -50,6 +50,13 @@ void Compile::Output() {
|
||||||
init_scratch_buffer_blob();
|
init_scratch_buffer_blob();
|
||||||
if (failing()) return; // Out of memory
|
if (failing()) return; // Out of memory
|
||||||
|
|
||||||
|
// The number of new nodes (mostly MachNop) is proportional to
|
||||||
|
// the number of java calls and inner loops which are aligned.
|
||||||
|
if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
|
||||||
|
C->inner_loops()*(OptoLoopAlignment-1)),
|
||||||
|
"out of nodes before code generation" ) ) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
// Make sure I can find the Start Node
|
// Make sure I can find the Start Node
|
||||||
Block_Array& bbs = _cfg->_bbs;
|
Block_Array& bbs = _cfg->_bbs;
|
||||||
Block *entry = _cfg->_blocks[1];
|
Block *entry = _cfg->_blocks[1];
|
||||||
|
@ -1105,7 +1112,7 @@ void Compile::Fill_buffer() {
|
||||||
uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
|
uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
|
||||||
|
|
||||||
uint return_offset = 0;
|
uint return_offset = 0;
|
||||||
MachNode *nop = new (this) MachNopNode();
|
int nop_size = (new (this) MachNopNode())->size(_regalloc);
|
||||||
|
|
||||||
int previous_offset = 0;
|
int previous_offset = 0;
|
||||||
int current_offset = 0;
|
int current_offset = 0;
|
||||||
|
@ -1188,7 +1195,6 @@ void Compile::Fill_buffer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// align the instruction if necessary
|
// align the instruction if necessary
|
||||||
int nop_size = nop->size(_regalloc);
|
|
||||||
int padding = mach->compute_padding(current_offset);
|
int padding = mach->compute_padding(current_offset);
|
||||||
// Make sure safepoint node for polling is distinct from a call's
|
// Make sure safepoint node for polling is distinct from a call's
|
||||||
// return by adding a nop if needed.
|
// return by adding a nop if needed.
|
||||||
|
@ -1372,7 +1378,6 @@ void Compile::Fill_buffer() {
|
||||||
|
|
||||||
// If the next block is the top of a loop, pad this block out to align
|
// If the next block is the top of a loop, pad this block out to align
|
||||||
// the loop top a little. Helps prevent pipe stalls at loop back branches.
|
// the loop top a little. Helps prevent pipe stalls at loop back branches.
|
||||||
int nop_size = (new (this) MachNopNode())->size(_regalloc);
|
|
||||||
if( i<_cfg->_num_blocks-1 ) {
|
if( i<_cfg->_num_blocks-1 ) {
|
||||||
Block *nb = _cfg->_blocks[i+1];
|
Block *nb = _cfg->_blocks[i+1];
|
||||||
uint padding = nb->alignment_padding(current_offset);
|
uint padding = nb->alignment_padding(current_offset);
|
||||||
|
|
|
@ -450,6 +450,8 @@ public:
|
||||||
subsume_node(old, nn);
|
subsume_node(old, nn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool delay_transform() const { return _delay_transform; }
|
||||||
|
|
||||||
void set_delay_transform(bool delay) {
|
void set_delay_transform(bool delay) {
|
||||||
_delay_transform = delay;
|
_delay_transform = delay;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1216,6 +1216,8 @@ inline bool Type::is_floatingpoint() const {
|
||||||
#define Op_AndX Op_AndL
|
#define Op_AndX Op_AndL
|
||||||
#define Op_AddX Op_AddL
|
#define Op_AddX Op_AddL
|
||||||
#define Op_SubX Op_SubL
|
#define Op_SubX Op_SubL
|
||||||
|
#define Op_XorX Op_XorL
|
||||||
|
#define Op_URShiftX Op_URShiftL
|
||||||
// conversions
|
// conversions
|
||||||
#define ConvI2X(x) ConvI2L(x)
|
#define ConvI2X(x) ConvI2L(x)
|
||||||
#define ConvL2X(x) (x)
|
#define ConvL2X(x) (x)
|
||||||
|
@ -1258,6 +1260,8 @@ inline bool Type::is_floatingpoint() const {
|
||||||
#define Op_AndX Op_AndI
|
#define Op_AndX Op_AndI
|
||||||
#define Op_AddX Op_AddI
|
#define Op_AddX Op_AddI
|
||||||
#define Op_SubX Op_SubI
|
#define Op_SubX Op_SubI
|
||||||
|
#define Op_XorX Op_XorI
|
||||||
|
#define Op_URShiftX Op_URShiftI
|
||||||
// conversions
|
// conversions
|
||||||
#define ConvI2X(x) (x)
|
#define ConvI2X(x) (x)
|
||||||
#define ConvL2X(x) ConvL2I(x)
|
#define ConvL2X(x) ConvL2I(x)
|
||||||
|
|
|
@ -104,7 +104,17 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* r
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
case Location::oop: {
|
case Location::oop: {
|
||||||
Handle h(*(oop *)value_addr); // Wrap a handle around the oop
|
oop val = *(oop *)value_addr;
|
||||||
|
#ifdef _LP64
|
||||||
|
if (Universe::is_narrow_oop_base(val)) {
|
||||||
|
// Compiled code may produce decoded oop = narrow_oop_base
|
||||||
|
// when a narrow oop implicit null check is used.
|
||||||
|
// The narrow_oop_base could be NULL or be the address
|
||||||
|
// of the page below heap. Use NULL value for both cases.
|
||||||
|
val = (oop)NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
Handle h(val); // Wrap a handle around the oop
|
||||||
return new StackValue(h);
|
return new StackValue(h);
|
||||||
}
|
}
|
||||||
case Location::addr: {
|
case Location::addr: {
|
||||||
|
|
75
hotspot/test/compiler/6826736/Test.java
Normal file
75
hotspot/test/compiler/6826736/Test.java
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6826736
|
||||||
|
* @summary CMS: core dump with -XX:+UseCompressedOops
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
int[] arr;
|
||||||
|
int[] arr2;
|
||||||
|
int test(int r) {
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
for (int j = i; j < 100; j++) {
|
||||||
|
int a = 0;
|
||||||
|
for (long k = 0; k < 100; k++) {
|
||||||
|
a += k;
|
||||||
|
}
|
||||||
|
if (arr != null)
|
||||||
|
a = arr[j];
|
||||||
|
r += a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
int r = 0;
|
||||||
|
Test t = new Test();
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = new int[100];
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
System.out.println("Warmup 1 is done.");
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = null;
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
System.out.println("Warmup 2 is done.");
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = new int[100];
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
System.out.println("Warmup is done.");
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
t.arr = new int[1000000];
|
||||||
|
t.arr = null;
|
||||||
|
r = t.test(r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
* Copyright 2009 Google Inc. All Rights Reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
|
124
hotspot/test/compiler/6851282/Test.java
Normal file
124
hotspot/test/compiler/6851282/Test.java
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6851282
|
||||||
|
* @summary JIT miscompilation results in null entry in array when using CompressedOops
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
void foo(A a, A[] as) {
|
||||||
|
for (A a1 : as) {
|
||||||
|
B[] filtered = a.c(a1);
|
||||||
|
for (B b : filtered) {
|
||||||
|
if (b == null) {
|
||||||
|
System.out.println("bug: b == null");
|
||||||
|
System.exit(97);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
List<A> as = new ArrayList<A>();
|
||||||
|
for (int i = 0; i < 5000; i++) {
|
||||||
|
List<B> bs = new ArrayList<B>();
|
||||||
|
for (int j = i; j < i + 1000; j++)
|
||||||
|
bs.add(new B(j));
|
||||||
|
as.add(new A(bs.toArray(new B[0])));
|
||||||
|
}
|
||||||
|
new Test().foo(as.get(0), as.subList(1, as.size()).toArray(new A[0]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class A {
|
||||||
|
final B[] bs;
|
||||||
|
|
||||||
|
public A(B[] bs) {
|
||||||
|
this.bs = bs;
|
||||||
|
}
|
||||||
|
|
||||||
|
final B[] c(final A a) {
|
||||||
|
return new BoxedArray<B>(bs).filter(new Function<B, Boolean>() {
|
||||||
|
public Boolean apply(B arg) {
|
||||||
|
for (B b : a.bs) {
|
||||||
|
if (b.d == arg.d)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class BoxedArray<T> {
|
||||||
|
|
||||||
|
private final T[] array;
|
||||||
|
|
||||||
|
BoxedArray(T[] array) {
|
||||||
|
this.array = array;
|
||||||
|
}
|
||||||
|
|
||||||
|
public T[] filter(Function<T, Boolean> function) {
|
||||||
|
boolean[] include = new boolean[array.length];
|
||||||
|
int len = 0;
|
||||||
|
int i = 0;
|
||||||
|
while (i < array.length) {
|
||||||
|
if (function.apply(array[i])) {
|
||||||
|
include[i] = true;
|
||||||
|
len += 1;
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
T[] result = (T[]) java.lang.reflect.Array.newInstance(array.getClass().getComponentType(), len);
|
||||||
|
len = 0;
|
||||||
|
i = 0;
|
||||||
|
while (len < result.length) {
|
||||||
|
if (include[i]) {
|
||||||
|
result[len] = array[i];
|
||||||
|
len += 1;
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Function<T, R> {
|
||||||
|
R apply(T arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
class B {
|
||||||
|
final int d;
|
||||||
|
public B(int d) {
|
||||||
|
this.d = d;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
68
hotspot/test/compiler/6857159/Test6857159.java
Normal file
68
hotspot/test/compiler/6857159/Test6857159.java
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6857159
|
||||||
|
* @summary local schedule failed with checkcast of Thread.currentThread()
|
||||||
|
*
|
||||||
|
* @run shell Test6857159.sh
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test6857159 extends Thread {
|
||||||
|
static class ct0 extends Test6857159 {
|
||||||
|
public void message() {
|
||||||
|
// System.out.println("message");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void run() {
|
||||||
|
message();
|
||||||
|
ct0 ct = (ct0) Thread.currentThread();
|
||||||
|
ct.message();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static class ct1 extends ct0 {
|
||||||
|
public void message() {
|
||||||
|
// System.out.println("message");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static class ct2 extends ct0 {
|
||||||
|
public void message() {
|
||||||
|
// System.out.println("message");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
for (int i = 0; i < 100000; i++) {
|
||||||
|
Thread t = null;
|
||||||
|
switch (i % 3) {
|
||||||
|
case 0: t = new ct0(); break;
|
||||||
|
case 1: t = new ct1(); break;
|
||||||
|
case 2: t = new ct2(); break;
|
||||||
|
}
|
||||||
|
t.start();
|
||||||
|
t.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
65
hotspot/test/compiler/6857159/Test6857159.sh
Normal file
65
hotspot/test/compiler/6857159/Test6857159.sh
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
#
|
||||||
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
# under the terms of the GNU General Public License version 2 only, as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
# version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
# accompanied this code).
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License version
|
||||||
|
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
#
|
||||||
|
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
# CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
# have any questions.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
if [ "${TESTSRC}" = "" ]
|
||||||
|
then
|
||||||
|
echo "TESTSRC not set. Test cannot execute. Failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "TESTSRC=${TESTSRC}"
|
||||||
|
if [ "${TESTJAVA}" = "" ]
|
||||||
|
then
|
||||||
|
echo "TESTJAVA not set. Test cannot execute. Failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "TESTJAVA=${TESTJAVA}"
|
||||||
|
if [ "${TESTCLASSES}" = "" ]
|
||||||
|
then
|
||||||
|
echo "TESTCLASSES not set. Test cannot execute. Failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "TESTCLASSES=${TESTCLASSES}"
|
||||||
|
echo "CLASSPATH=${CLASSPATH}"
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
cp ${TESTSRC}/Test6857159.java .
|
||||||
|
cp ${TESTSRC}/Test6857159.sh .
|
||||||
|
|
||||||
|
${TESTJAVA}/bin/javac -d . Test6857159.java
|
||||||
|
|
||||||
|
${TESTJAVA}/bin/java ${TESTVMOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
|
||||||
|
|
||||||
|
grep "COMPILE SKIPPED" test.out
|
||||||
|
|
||||||
|
result=$?
|
||||||
|
if [ $result -eq 1 ]
|
||||||
|
then
|
||||||
|
echo "Passed"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
51
hotspot/test/compiler/6859338/Test6859338.java
Normal file
51
hotspot/test/compiler/6859338/Test6859338.java
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6859338
|
||||||
|
* @summary Assertion failure in sharedRuntime.cpp
|
||||||
|
*
|
||||||
|
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test6859338 {
|
||||||
|
static Object[] o = new Object[] { new Object(), null };
|
||||||
|
public static void main(String[] args) {
|
||||||
|
int total = 0;
|
||||||
|
try {
|
||||||
|
// Exercise the implicit null check in the unverified entry point
|
||||||
|
for (int i = 0; i < 40000; i++) {
|
||||||
|
int limit = o.length;
|
||||||
|
if (i < 20000) limit = 1;
|
||||||
|
for (int j = 0; j < limit; j++) {
|
||||||
|
total += o[j].hashCode();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
// this is expected. A true failure causes a crash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
71
hotspot/test/compiler/6860469/Test.java
Normal file
71
hotspot/test/compiler/6860469/Test.java
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2009 Google Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 6860469
|
||||||
|
* @summary remix_address_expressions reshapes address expression with bad control
|
||||||
|
*
|
||||||
|
* @run main/othervm -Xcomp -XX:CompileOnly=Test.C Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
|
||||||
|
private static final int H = 16;
|
||||||
|
private static final int F = 9;
|
||||||
|
|
||||||
|
static int[] fl = new int[1 << F];
|
||||||
|
|
||||||
|
static int C(int ll, int f) {
|
||||||
|
int max = -1;
|
||||||
|
int min = H + 1;
|
||||||
|
|
||||||
|
if (ll != 0) {
|
||||||
|
if (ll < min) {
|
||||||
|
min = ll;
|
||||||
|
}
|
||||||
|
if (ll > max) {
|
||||||
|
max = ll;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (f > max) {
|
||||||
|
f = max;
|
||||||
|
}
|
||||||
|
if (min > f) {
|
||||||
|
min = f;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int mc = 1 >> max - f; mc <= 0; mc++) {
|
||||||
|
int i = mc << (32 - f);
|
||||||
|
fl[i] = max;
|
||||||
|
}
|
||||||
|
|
||||||
|
return min;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String argv[]) {
|
||||||
|
C(0, 10);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue