diff --git a/.hgtags b/.hgtags index dbc6b22ea68..a8e52f397ec 100644 --- a/.hgtags +++ b/.hgtags @@ -116,3 +116,4 @@ d1cf7d4ee16c341f5b8c7e7f1d68a8c412b6c693 jdk7-b137 955488f34ca418f6cdab843d61c20d2c615637d9 jdk7-b139 f4298bc3f4b6baa315643be06966f09684290068 jdk7-b140 5d86d0c7692e8f4a58d430d68c03594e2d3403b3 jdk7-b141 +92bf0655022d4187e9b49c1400f98fb3392a4630 jdk7-b142 diff --git a/.hgtags-top-repo b/.hgtags-top-repo index 6745af50b29..8c8e07b841c 100644 --- a/.hgtags-top-repo +++ b/.hgtags-top-repo @@ -116,3 +116,4 @@ fc47c97bbbd91b1f774d855c48a7e285eb1a351a jdk7-b138 7ed6d0b9aaa12320832a7ddadb88d6d8d0dda4c1 jdk7-b139 dcfe74f1c6553c556e7d361c30b0b614eb5e40f6 jdk7-b140 c6569c5585851dfd39b8de8e021c3c312f51af12 jdk7-b141 +cfbbdb77eac0397b03eb99ee2e07ea00e0a7b81e jdk7-b142 diff --git a/Makefile b/Makefile index c8598161cec..5d0e08d6d15 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,34 @@ BUILD_PARENT_DIRECTORY=. +# Basename of any originally supplied ALT_OUTPUTDIR directory +ifndef ORIG_OUTPUTDIR_BASENAME + ifdef ALT_OUTPUTDIR + ORIG_OUTPUTDIR_BASENAME := $(shell basename $(ALT_OUTPUTDIR)) + else + ORIG_OUTPUTDIR_BASENAME = $(PLATFORM)-$(ARCH) + endif +endif +export ORIG_OUTPUTDIR_BASENAME + +# The three possible directories created for output (3 build flavors) +OUTPUTDIR_BASENAME- = $(ORIG_OUTPUTDIR_BASENAME) +OUTPUTDIR_BASENAME-debug = $(ORIG_OUTPUTDIR_BASENAME)-debug +OUTPUTDIR_BASENAME-fastdebug = $(ORIG_OUTPUTDIR_BASENAME)-fastdebug + +# Relative path to a debug output area +REL_JDK_OUTPUTDIR = ../$(OUTPUTDIR_BASENAME-$(DEBUG_NAME)) + +# The created jdk image directory +JDK_IMAGE_DIRNAME = j2sdk-image +JDK_IMAGE_DIR = $(OUTPUTDIR)/$(JDK_IMAGE_DIRNAME) +ABS_JDK_IMAGE_DIR = $(ABS_OUTPUTDIR)/$(JDK_IMAGE_DIRNAME) + +# Relative path from an output directory to the image directory +REL_JDK_IMAGE_DIR = ../$(OUTPUTDIR_BASENAME-$(DEBUG_NAME))/$(JDK_IMAGE_DIRNAME) +REL_JDK_DEBUG_IMAGE_DIR = ../$(OUTPUTDIR_BASENAME-debug)/$(JDK_IMAGE_DIRNAME) +REL_JDK_FASTDEBUG_IMAGE_DIR = ../$(OUTPUTDIR_BASENAME-fastdebug)/$(JDK_IMAGE_DIRNAME) + ifndef TOPDIR TOPDIR:=. endif @@ -98,8 +126,7 @@ endef # Generic build of basic repo series generic_build_repo_series:: $(SOURCE_TIPS) - $(MKDIR) -p $(OUTPUTDIR) - $(MKDIR) -p $(OUTPUTDIR)/j2sdk-image + $(MKDIR) -p $(JDK_IMAGE_DIR) @$(call StartTimer) ifeq ($(BUILD_LANGTOOLS), true) @@ -146,8 +173,8 @@ generic_build_repo_series:: # # DEBUG_NAME is fastdebug or debug # ALT_OUTPUTDIR is changed to have -debug or -fastdebug suffix -# The resulting j2sdk-image is used by the install makefiles to create a -# debug install bundle jdk-*-debug-** bundle (tar or zip) +# The resulting image directory (j2sdk-image) is used by the install makefiles +# to create a debug install bundle jdk-*-debug-** bundle (tar or zip) # which will install in the debug or fastdebug subdirectory of the # normal product install area. # The install process needs to know what the DEBUG_NAME is, so @@ -160,8 +187,8 @@ generic_build_repo_series:: # Location of fresh bootdir output ABS_BOOTDIR_OUTPUTDIR=$(ABS_OUTPUTDIR)/bootjdk -FRESH_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/j2sdk-image -FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-$(DEBUG_NAME)/j2sdk-image +FRESH_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/$(JDK_IMAGE_DIRNAME) +FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/$(REL_JDK_IMAGE_DIR) create_fresh_product_bootdir: FRC $(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \ @@ -226,7 +253,7 @@ build_product_image: generic_debug_build: $(MAKE) \ - ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-$(DEBUG_NAME) \ + ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)/$(REL_JDK_OUTPUTDIR) \ DEBUG_NAME=$(DEBUG_NAME) \ GENERATE_DOCS=false \ $(BOOT_CYCLE_DEBUG_SETTINGS) \ @@ -254,8 +281,6 @@ $(SOURCE_TIPS): FRC clobber:: REPORT_BUILD_TIMES= clobber:: $(RM) -r $(OUTPUTDIR)/* - $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-debug/* - $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-fastdebug/* -($(RMDIR) -p $(OUTPUTDIR) > $(DEV_NULL) 2>&1; $(TRUE)) clean: clobber @@ -489,8 +514,8 @@ $(OUTPUTDIR)/test_failures.txt: $(OUTPUTDIR)/test_log.txt # Get log file of all tests run JDK_TO_TEST := $(shell \ - if [ -d "$(ABS_OUTPUTDIR)/j2sdk-image" ] ; then \ - $(ECHO) "$(ABS_OUTPUTDIR)/j2sdk-image"; \ + if [ -d "$(ABS_JDK_IMAGE_DIR)" ] ; then \ + $(ECHO) "$(ABS_JDK_IMAGE_DIR)"; \ elif [ -d "$(ABS_OUTPUTDIR)/bin" ] ; then \ $(ECHO) "$(ABS_OUTPUTDIR)"; \ elif [ "$(PRODUCT_HOME)" != "" -a -d "$(PRODUCT_HOME)/bin" ] ; then \ diff --git a/corba/.hgtags b/corba/.hgtags index 9624f1f3878..2f400d34aab 100644 --- a/corba/.hgtags +++ b/corba/.hgtags @@ -116,3 +116,4 @@ a66c01d8bf895261715955df0b95545c000ed6a8 jdk7-b137 60b074ec6fcf5cdf9efce22fdfb02326ed8fa2d3 jdk7-b139 cdf5d19ec142424489549025e9c42e51f32cf688 jdk7-b140 a58635cdd921bafef353f4864184a0481353197b jdk7-b141 +a2f340a048c88d10cbedc0504f5cf03d39925a40 jdk7-b142 diff --git a/hotspot/.hgtags b/hotspot/.hgtags index 5e01fb71812..8b20d73be2a 100644 --- a/hotspot/.hgtags +++ b/hotspot/.hgtags @@ -170,3 +170,5 @@ d283b82966712b353fa307845a1316da42a355f4 jdk7-b140 d283b82966712b353fa307845a1316da42a355f4 hs21-b10 5d07913abd59261c77f24cc04a759cb75d804099 jdk7-b141 3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11 +9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142 +9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12 diff --git a/hotspot/make/hotspot_version b/hotspot/make/hotspot_version index a8dda3ec3ba..452ad051644 100644 --- a/hotspot/make/hotspot_version +++ b/hotspot/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011 HS_MAJOR_VER=21 HS_MINOR_VER=0 -HS_BUILD_NUMBER=12 +HS_BUILD_NUMBER=13 JDK_MAJOR_VER=1 JDK_MINOR_VER=7 diff --git a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp index f3a3f10ea80..46d7d27a259 100644 --- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp @@ -2176,6 +2176,7 @@ int AbstractInterpreter::layout_activation(methodOop method, int tempcount, // Number of slots on java expression stack in use int popframe_extra_args, int moncount, // Number of active monitors + int caller_actual_parameters, int callee_param_size, int callee_locals_size, frame* caller, diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp index d9dd1700ca2..10d4772ab78 100644 --- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp @@ -811,7 +811,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const { #ifdef ASSERT #define DESCRIBE_FP_OFFSET(name) \ - values.describe(-1, fp() + frame::name##_offset, #name) + values.describe(frame_no, fp() + frame::name##_offset, #name) void frame::describe_pd(FrameValues& values, int frame_no) { for (int w = 0; w < frame::register_save_words; w++) { diff --git a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp index f90f28873e8..774c3b64d45 100644 --- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp @@ -423,25 +423,6 @@ bool AbstractInterpreter::can_be_compiled(methodHandle m) { return true; } -// This method tells the deoptimizer how big an interpreted frame must be: -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, - popframe_extra_args, - moncount, - callee_param_count, - callee_locals, - (frame*)NULL, - (frame*)NULL, - is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in diff --git a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp index 4e80e1acdda..5f858bd84ee 100644 --- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp @@ -142,18 +142,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* Register O2_form = O2_scratch; Register O3_adapter = O3_scratch; __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); - // load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); - // deal with old JDK versions: - __ add( Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); - __ cmp(O3_adapter, O2_form); - Label sorry_no_invoke_generic; - __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic); - __ delayed()->nop(); - - __ load_heap_oop(Address(O3_adapter, 0), O3_adapter); - __ tst(O3_adapter); - __ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic); - __ delayed()->nop(); + __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); + __ verify_oop(O3_adapter); __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); // As a trusted first argument, pass the type being called, so the adapter knows // the actual types of the arguments and return values. @@ -164,12 +154,6 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* trace_method_handle(_masm, "invokeGeneric"); __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); - __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! - __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType - // mov(G3_method_handle, G3_method_handle); // already in this register - __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); - __ delayed()->nop(); - return entry_point; } diff --git a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp index ac855dad84a..efc0a46c872 100644 --- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp @@ -1623,6 +1623,7 @@ int AbstractInterpreter::layout_activation(methodOop method, int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_local_count, frame* caller, @@ -1698,21 +1699,35 @@ int AbstractInterpreter::layout_activation(methodOop method, popframe_extra_args; int local_words = method->max_locals() * Interpreter::stackElementWords; + NEEDS_CLEANUP; intptr_t* locals; - if (caller->is_compiled_frame()) { - // Compiled frames do not allocate a varargs area so place them - // next to the register save area. - locals = fp + frame::register_save_words + local_words - 1; - // Caller wants his own SP back - int caller_frame_size = caller->cb()->frame_size(); - *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; + if (caller->is_interpreted_frame()) { + // Can force the locals area to end up properly overlapping the top of the expression stack. + intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; + // Note that this computation means we replace size_of_parameters() values from the caller + // interpreter frame's expression stack with our argument locals + int parm_words = caller_actual_parameters * Interpreter::stackElementWords; + locals = Lesp_ptr + parm_words; + int delta = local_words - parm_words; + int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; + *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; } else { - assert(caller->is_interpreted_frame() || caller->is_entry_frame(), "only possible cases"); - // The entry and interpreter frames are laid out like normal C - // frames so place the locals adjacent to the varargs area. - locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; - if (caller->is_interpreted_frame()) { - *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + rounded_cls) - STACK_BIAS; + assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); + // Don't have Lesp available; lay out locals block in the caller + // adjacent to the register window save area. + // + // Compiled frames do not allocate a varargs area which is why this if + // statement is needed. + // + if (caller->is_compiled_frame()) { + locals = fp + frame::register_save_words + local_words - 1; + } else { + locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; + } + if (!caller->is_entry_frame()) { + // Caller wants his own SP back + int caller_frame_size = caller->cb()->frame_size(); + *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; } } if (TraceDeoptimization) { diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.hpp b/hotspot/src/cpu/x86/vm/assembler_x86.hpp index 34e8ed9c87f..c36709a33df 100644 --- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp +++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp @@ -234,6 +234,20 @@ class Address VALUE_OBJ_CLASS_SPEC { a._disp += disp; return a; } + Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { + Address a = (*this); + a._disp += disp.constant_or_zero() * scale_size(scale); + if (disp.is_register()) { + assert(!a.index()->is_valid(), "competing indexes"); + a._index = disp.as_register(); + a._scale = scale; + } + return a; + } + bool is_same_address(Address a) const { + // disregard _rspec + return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; + } // The following two overloads are used in connection with the // ByteSize type (see sizes.hpp). They simplify the use of @@ -2029,6 +2043,10 @@ class MacroAssembler: public Assembler { void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } void addptr(Register dst, int32_t src); void addptr(Register dst, Register src); + void addptr(Register dst, RegisterOrConstant src) { + if (src.is_constant()) addptr(dst, (int) src.as_constant()); + else addptr(dst, src.as_register()); + } void andptr(Register dst, int32_t src); void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } @@ -2090,7 +2108,10 @@ class MacroAssembler: public Assembler { void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } void subptr(Register dst, int32_t src); void subptr(Register dst, Register src); - + void subptr(Register dst, RegisterOrConstant src) { + if (src.is_constant()) subptr(dst, (int) src.as_constant()); + else subptr(dst, src.as_register()); + } void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } @@ -2288,6 +2309,11 @@ public: void movptr(Address dst, Register src); + void movptr(Register dst, RegisterOrConstant src) { + if (src.is_constant()) movptr(dst, src.as_constant()); + else movptr(dst, src.as_register()); + } + #ifdef _LP64 // Generally the next two are only used for moving NULL // Although there are situations in initializing the mark word where diff --git a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp index edacb282edb..226c6cbc6e6 100644 --- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp +++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp @@ -2339,14 +2339,15 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill, } int AbstractInterpreter::layout_activation(methodOop method, - int tempcount, // - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - frame* caller, - frame* interpreter_frame, - bool is_top_frame) { + int tempcount, // + int popframe_extra_args, + int moncount, + int caller_actual_parameters, + int callee_param_count, + int callee_locals, + frame* caller, + frame* interpreter_frame, + bool is_top_frame) { assert(popframe_extra_args == 0, "FIX ME"); // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state() diff --git a/hotspot/src/cpu/x86/vm/frame_x86.cpp b/hotspot/src/cpu/x86/vm/frame_x86.cpp index 8709a0e8698..21705957e50 100644 --- a/hotspot/src/cpu/x86/vm/frame_x86.cpp +++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp @@ -339,7 +339,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const { return fr; } - //------------------------------------------------------------------------------ // frame::verify_deopt_original_pc // @@ -361,6 +360,55 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool } #endif +//------------------------------------------------------------------------------ +// frame::adjust_unextended_sp +void frame::adjust_unextended_sp() { + // If we are returning to a compiled MethodHandle call site, the + // saved_fp will in fact be a saved value of the unextended SP. The + // simplest way to tell whether we are returning to such a call site + // is as follows: + + nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null(); + if (sender_nm != NULL) { + // If the sender PC is a deoptimization point, get the original + // PC. For MethodHandle call site the unextended_sp is stored in + // saved_fp. + if (sender_nm->is_deopt_mh_entry(_pc)) { + DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp)); + _unextended_sp = _fp; + } + else if (sender_nm->is_deopt_entry(_pc)) { + DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp)); + } + else if (sender_nm->is_method_handle_return(_pc)) { + _unextended_sp = _fp; + } + } +} + +//------------------------------------------------------------------------------ +// frame::update_map_with_saved_link +void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) { + // The interpreter and compiler(s) always save EBP/RBP in a known + // location on entry. We must record where that location is + // so this if EBP/RBP was live on callout from c2 we can find + // the saved copy no matter what it called. + + // Since the interpreter always saves EBP/RBP if we record where it is then + // we don't have to always save EBP/RBP on entry and exit to c2 compiled + // code, on entry will be enough. + map->set_location(rbp->as_VMReg(), (address) link_addr); +#ifdef AMD64 + // this is weird "H" ought to be at a higher address however the + // oopMaps seems to have the "H" regs at the same address and the + // vanilla register. + // XXXX make this go away + if (true) { + map->set_location(rbp->as_VMReg()->next(), (address) link_addr); + } +#endif // AMD64 +} + //------------------------------------------------------------------------------ // frame::sender_for_interpreter_frame @@ -372,54 +420,13 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const { // This is the sp before any possible extension (adapter/locals). intptr_t* unextended_sp = interpreter_frame_sender_sp(); - // Stored FP. - intptr_t* saved_fp = link(); - - address sender_pc = this->sender_pc(); - CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc); - assert(sender_cb, "sanity"); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - - if (sender_nm != NULL) { - // If the sender PC is a deoptimization point, get the original - // PC. For MethodHandle call site the unextended_sp is stored in - // saved_fp. - if (sender_nm->is_deopt_mh_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp)); - unextended_sp = saved_fp; - } - else if (sender_nm->is_deopt_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp)); - } - else if (sender_nm->is_method_handle_return(sender_pc)) { - unextended_sp = saved_fp; - } - } - - // The interpreter and compiler(s) always save EBP/RBP in a known - // location on entry. We must record where that location is - // so this if EBP/RBP was live on callout from c2 we can find - // the saved copy no matter what it called. - - // Since the interpreter always saves EBP/RBP if we record where it is then - // we don't have to always save EBP/RBP on entry and exit to c2 compiled - // code, on entry will be enough. #ifdef COMPILER2 if (map->update_map()) { - map->set_location(rbp->as_VMReg(), (address) addr_at(link_offset)); -#ifdef AMD64 - // this is weird "H" ought to be at a higher address however the - // oopMaps seems to have the "H" regs at the same address and the - // vanilla register. - // XXXX make this go away - if (true) { - map->set_location(rbp->as_VMReg()->next(), (address)addr_at(link_offset)); - } -#endif // AMD64 + update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset)); } #endif // COMPILER2 - return frame(sender_sp, unextended_sp, saved_fp, sender_pc); + return frame(sender_sp, unextended_sp, link(), sender_pc()); } @@ -427,6 +434,7 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const { // frame::sender_for_compiled_frame frame frame::sender_for_compiled_frame(RegisterMap* map) const { assert(map != NULL, "map must be set"); + assert(!is_ricochet_frame(), "caller must handle this"); // frame owned by optimizing compiler assert(_cb->frame_size() >= 0, "must have non-zero frame size"); @@ -438,31 +446,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const { // This is the saved value of EBP which may or may not really be an FP. // It is only an FP if the sender is an interpreter frame (or C1?). - intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset); - - // If we are returning to a compiled MethodHandle call site, the - // saved_fp will in fact be a saved value of the unextended SP. The - // simplest way to tell whether we are returning to such a call site - // is as follows: - CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc); - assert(sender_cb, "sanity"); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - - if (sender_nm != NULL) { - // If the sender PC is a deoptimization point, get the original - // PC. For MethodHandle call site the unextended_sp is stored in - // saved_fp. - if (sender_nm->is_deopt_mh_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp)); - unextended_sp = saved_fp; - } - else if (sender_nm->is_deopt_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp)); - } - else if (sender_nm->is_method_handle_return(sender_pc)) { - unextended_sp = saved_fp; - } - } + intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); if (map->update_map()) { // Tell GC to use argument oopmaps for some runtime stubs that need it. @@ -472,23 +456,15 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const { if (_cb->oop_maps() != NULL) { OopMapSet::update_register_map(this, map); } + // Since the prolog does the save and restore of EBP there is no oopmap // for it so we must fill in its location as if there was an oopmap entry // since if our caller was compiled code there could be live jvm state in it. - map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset)); -#ifdef AMD64 - // this is weird "H" ought to be at a higher address however the - // oopMaps seems to have the "H" regs at the same address and the - // vanilla register. - // XXXX make this go away - if (true) { - map->set_location(rbp->as_VMReg()->next(), (address) (sender_sp - frame::sender_sp_offset)); - } -#endif // AMD64 + update_map_with_saved_link(map, saved_fp_addr); } assert(sender_sp != sp(), "must have changed"); - return frame(sender_sp, unextended_sp, saved_fp, sender_pc); + return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc); } @@ -502,6 +478,7 @@ frame frame::sender(RegisterMap* map) const { if (is_entry_frame()) return sender_for_entry_frame(map); if (is_interpreted_frame()) return sender_for_interpreter_frame(map); assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); + if (is_ricochet_frame()) return sender_for_ricochet_frame(map); if (_cb != NULL) { return sender_for_compiled_frame(map); @@ -673,7 +650,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const { #ifdef ASSERT #define DESCRIBE_FP_OFFSET(name) \ - values.describe(-1, fp() + frame::name##_offset, #name) + values.describe(frame_no, fp() + frame::name##_offset, #name) void frame::describe_pd(FrameValues& values, int frame_no) { if (is_interpreted_frame()) { diff --git a/hotspot/src/cpu/x86/vm/frame_x86.hpp b/hotspot/src/cpu/x86/vm/frame_x86.hpp index ea92dc518cc..a307a340428 100644 --- a/hotspot/src/cpu/x86/vm/frame_x86.hpp +++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp @@ -164,6 +164,7 @@ // original sp we use that convention. intptr_t* _unextended_sp; + void adjust_unextended_sp(); intptr_t* ptr_at_addr(int offset) const { return (intptr_t*) addr_at(offset); @@ -197,6 +198,9 @@ // expression stack tos if we are nested in a java call intptr_t* interpreter_frame_last_sp() const; + // helper to update a map with callee-saved RBP + static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr); + #ifndef CC_INTERP // deoptimization support void interpreter_frame_set_last_sp(intptr_t* sp); diff --git a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp index bb9ac15cb31..d093facdc15 100644 --- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp +++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp @@ -62,6 +62,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address _pc = pc; assert(pc != NULL, "no pc?"); _cb = CodeCache::find_blob(pc); + adjust_unextended_sp(); address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != NULL) { diff --git a/hotspot/src/cpu/x86/vm/interpreter_x86.hpp b/hotspot/src/cpu/x86/vm/interpreter_x86.hpp index f3ff867b893..8a6169c0ca4 100644 --- a/hotspot/src/cpu/x86/vm/interpreter_x86.hpp +++ b/hotspot/src/cpu/x86/vm/interpreter_x86.hpp @@ -26,7 +26,9 @@ #define CPU_X86_VM_INTERPRETER_X86_HPP public: - static Address::ScaleFactor stackElementScale() { return Address::times_4; } + static Address::ScaleFactor stackElementScale() { + return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8); + } // Offset from rsp (which points to the last stack element) static int expr_offset_in_bytes(int i) { return stackElementSize * i; } diff --git a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp index cb2345a4183..43a5a18a5b6 100644 --- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp @@ -242,26 +242,6 @@ address InterpreterGenerator::generate_method_handle_entry(void) { return entry_point; } - -// This method tells the deoptimizer how big an interpreted frame must be: -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, - popframe_extra_args, - moncount, - callee_param_count, - callee_locals, - (frame*) NULL, - (frame*) NULL, - is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in diff --git a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp index 3dbea5754dc..1c124c2f0b7 100644 --- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp @@ -362,20 +362,6 @@ address InterpreterGenerator::generate_empty_entry(void) { } -// This method tells the deoptimizer how big an interpreted frame must be: -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, popframe_extra_args, moncount, - callee_param_count, callee_locals, - (frame*) NULL, (frame*) NULL, is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in diff --git a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp index 42a57c1322d..1427d13c2c4 100644 --- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp +++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp @@ -69,23 +69,475 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas return me; } +// stack walking support + +frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { + RicochetFrame* f = RicochetFrame::from_frame(fr); + if (map->update_map()) + frame::update_map_with_saved_link(map, &f->_sender_link); + return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc()); +} + +void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { + RicochetFrame* f = RicochetFrame::from_frame(fr); + + // pick up the argument type descriptor: + Thread* thread = Thread::current(); + Handle cookie(thread, f->compute_saved_args_layout(true, true)); + + // process fixed part + blk->do_oop((oop*)f->saved_target_addr()); + blk->do_oop((oop*)f->saved_args_layout_addr()); + + // process variable arguments: + if (cookie.is_null()) return; // no arguments to describe + + // the cookie is actually the invokeExact method for my target + // his argument signature is what I'm interested in + assert(cookie->is_method(), ""); + methodHandle invoker(thread, methodOop(cookie())); + assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); + assert(!invoker->is_static(), "must have MH argument"); + int slot_count = invoker->size_of_parameters(); + assert(slot_count >= 1, "must include 'this'"); + intptr_t* base = f->saved_args_base(); + intptr_t* retval = NULL; + if (f->has_return_value_slot()) + retval = f->return_value_slot_addr(); + int slot_num = slot_count; + intptr_t* loc = &base[slot_num -= 1]; + //blk->do_oop((oop*) loc); // original target, which is irrelevant + int arg_num = 0; + for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { + if (ss.at_return_type()) continue; + BasicType ptype = ss.type(); + if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT + assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); + loc = &base[slot_num -= type2size[ptype]]; + bool is_oop = (ptype == T_OBJECT && loc != retval); + if (is_oop) blk->do_oop((oop*)loc); + arg_num += 1; + } + assert(slot_num == 0, "must have processed all the arguments"); +} + +oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { + oop cookie = NULL; + if (read_cache) { + cookie = saved_args_layout(); + if (cookie != NULL) return cookie; + } + oop target = saved_target(); + oop mtype = java_lang_invoke_MethodHandle::type(target); + oop mtform = java_lang_invoke_MethodType::form(mtype); + cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); + if (write_cache) { + (*saved_args_layout_addr()) = cookie; + } + return cookie; +} + +void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, + // output params: + int* frame_size_in_words, + int* bounce_offset, + int* exception_offset) { + (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; + + address start = __ pc(); + #ifdef ASSERT -static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, - const char* error_message) { + __ hlt(); __ hlt(); __ hlt(); + // here's a hint of something special: + __ push(MAGIC_NUMBER_1); + __ push(MAGIC_NUMBER_2); +#endif //ASSERT + __ hlt(); // not reached + + // A return PC has just been popped from the stack. + // Return values are in registers. + // The ebp points into the RicochetFrame, which contains + // a cleanup continuation we must return to. + + (*bounce_offset) = __ pc() - start; + BLOCK_COMMENT("ricochet_blob.bounce"); + + if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); + trace_method_handle(_masm, "return/ricochet_blob.bounce"); + + __ jmp(frame_address(continuation_offset_in_bytes())); + __ hlt(); + DEBUG_ONLY(__ push(MAGIC_NUMBER_2)); + + (*exception_offset) = __ pc() - start; + BLOCK_COMMENT("ricochet_blob.exception"); + + // compare this to Interpreter::rethrow_exception_entry, which is parallel code + // for example, see TemplateInterpreterGenerator::generate_throw_exception + // Live registers in: + // rax: exception + // rdx: return address/pc that threw exception (ignored, always equal to bounce addr) + __ verify_oop(rax); + + // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed + + // Take down the frame. + + // Cf. InterpreterMacroAssembler::remove_activation. + leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg, + saved_last_sp_register(), + /*sender_pc_reg=*/ rdx); + + // In between activations - previous activation type unknown yet + // compute continuation point - the continuation point expects the + // following registers set up: + // + // rax: exception + // rdx: return address/pc that threw exception + // rsp: expression stack of caller + // rbp: ebp of caller + __ push(rax); // save exception + __ push(rdx); // save return address + Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi); + NOT_LP64(__ get_thread(thread_reg)); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, + SharedRuntime::exception_handler_for_return_address), + thread_reg, rdx); + __ mov(rbx, rax); // save exception handler + __ pop(rdx); // restore return address + __ pop(rax); // restore exception + __ jmp(rbx); // jump to exception + // handler of caller +} + +void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register rax_argv, + address return_handler, + Register rbx_temp) { + const Register saved_last_sp = saved_last_sp_register(); + Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); + Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); + + // Push the RicochetFrame a word at a time. + // This creates something similar to an interpreter frame. + // Cf. TemplateInterpreterGenerator::generate_fixed_frame. + BLOCK_COMMENT("push RicochetFrame {"); + DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame)); + assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), ""); +#define RF_FIELD(push_value, name) \ + { push_value; \ + assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); } + RF_FIELD(__ push(rbp), sender_link); + RF_FIELD(__ push(saved_last_sp), exact_sender_sp); // rsi/r13 + RF_FIELD(__ pushptr(rcx_amh_conversion), conversion); + RF_FIELD(__ push(rax_argv), saved_args_base); // can be updated if args are shifted + RF_FIELD(__ push((int32_t) NULL_WORD), saved_args_layout); // cache for GC layout cookie + if (UseCompressedOops) { + __ load_heap_oop(rbx_temp, rcx_mh_vmtarget); + RF_FIELD(__ push(rbx_temp), saved_target); + } else { + RF_FIELD(__ pushptr(rcx_mh_vmtarget), saved_target); + } + __ lea(rbx_temp, ExternalAddress(return_handler)); + RF_FIELD(__ push(rbx_temp), continuation); +#undef RF_FIELD + assert(rfo == 0, "fully initialized the RicochetFrame"); + // compute new frame pointer: + __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes())); + // Push guard word #1 in debug mode. + DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1)); + // For debugging, leave behind an indication of which stub built this frame. + DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); }); + BLOCK_COMMENT("} RicochetFrame"); +} + +void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register new_sp_reg, + Register sender_pc_reg) { + assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg); + const Register saved_last_sp = saved_last_sp_register(); + // Take down the frame. + // Cf. InterpreterMacroAssembler::remove_activation. + BLOCK_COMMENT("end_ricochet_frame {"); + // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down. + // This will keep stack in bounds even with unlimited tailcalls, each with an adapter. + if (rcx_recv->is_valid()) + __ movptr(rcx_recv, RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes())); + __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes())); + __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes())); + __ movptr(rbp, RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes())); + __ mov(rsp, new_sp_reg); + BLOCK_COMMENT("} end_ricochet_frame"); +} + +// Emit code to verify that RBP is pointing at a valid ricochet frame. +#ifdef ASSERT +enum { + ARG_LIMIT = 255, SLOP = 4, + // use this parameter for checking for garbage stack movements: + UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) + // the slop defends against false alarms due to fencepost errors +}; + +void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { + // The stack should look like this: + // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args | + // Check various invariants. + verify_offsets(); + + Register rdi_temp = rdi; + Register rcx_temp = rcx; + { __ push(rdi_temp); __ push(rcx_temp); } +#define UNPUSH_TEMPS \ + { __ pop(rcx_temp); __ pop(rdi_temp); } + + Address magic_number_1_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes()); + Address magic_number_2_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes()); + Address continuation_addr = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes()); + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); + + Label L_bad, L_ok; + BLOCK_COMMENT("verify_clean {"); + // Magic numbers must check out: + __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1); + __ jcc(Assembler::notEqual, L_bad); + __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2); + __ jcc(Assembler::notEqual, L_bad); + + // Arguments pointer must look reasonable: + __ movptr(rcx_temp, saved_args_base_addr); + __ cmpptr(rcx_temp, rbp); + __ jcc(Assembler::below, L_bad); + __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize); + __ cmpptr(rcx_temp, rbp); + __ jcc(Assembler::above, L_bad); + + load_conversion_dest_type(_masm, rdi_temp, conversion_addr); + __ cmpl(rdi_temp, T_VOID); + __ jcc(Assembler::equal, L_ok); + __ movptr(rcx_temp, saved_args_base_addr); + load_conversion_vminfo(_masm, rdi_temp, conversion_addr); + __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()), + (int32_t) RETURN_VALUE_PLACEHOLDER); + __ jcc(Assembler::equal, L_ok); + __ BIND(L_bad); + UNPUSH_TEMPS; + __ stop("damaged ricochet frame"); + __ BIND(L_ok); + UNPUSH_TEMPS; + BLOCK_COMMENT("} verify_clean"); + +#undef UNPUSH_TEMPS + +} +#endif //ASSERT + +void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { + if (VerifyMethodHandles) + verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), + "AMH argument is a Class"); + __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); +} + +void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { + int bits = BitsPerByte; + int offset = (CONV_VMINFO_SHIFT / bits); + int shift = (CONV_VMINFO_SHIFT % bits); + __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); + assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load"); + assert(shift == 0, "no shift needed"); +} + +void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { + int bits = BitsPerByte; + int offset = (CONV_DEST_TYPE_SHIFT / bits); + int shift = (CONV_DEST_TYPE_SHIFT % bits); + __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); + assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load"); + __ shrl(reg, shift); + DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1)); + assert((shift + conv_type_bits) == bits, "left justified in byte"); +} + +void MethodHandles::load_stack_move(MacroAssembler* _masm, + Register rdi_stack_move, + Register rcx_amh, + bool might_be_negative) { + BLOCK_COMMENT("load_stack_move"); + Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); + __ movl(rdi_stack_move, rcx_amh_conversion); + __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); +#ifdef _LP64 + if (might_be_negative) { + // clean high bits of stack motion register (was loaded as an int) + __ movslq(rdi_stack_move, rdi_stack_move); + } +#endif //_LP64 + if (VerifyMethodHandles) { + Label L_ok, L_bad; + int32_t stack_move_limit = 0x4000; // extra-large + __ cmpptr(rdi_stack_move, stack_move_limit); + __ jcc(Assembler::greaterEqual, L_bad); + __ cmpptr(rdi_stack_move, -stack_move_limit); + __ jcc(Assembler::greater, L_ok); + __ bind(L_bad); + __ stop("load_stack_move of garbage value"); + __ BIND(L_ok); + } +} + +#ifndef PRODUCT +void MethodHandles::RicochetFrame::verify_offsets() { + // Check compatibility of this struct with the more generally used offsets of class frame: + int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value + assert(ebp_off + wordSize*frame::interpreter_frame_method_offset == saved_args_base_offset_in_bytes(), ""); + assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset == conversion_offset_in_bytes(), ""); + assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset == exact_sender_sp_offset_in_bytes(), ""); + // These last two have to be exact: + assert(ebp_off + wordSize*frame::link_offset == sender_link_offset_in_bytes(), ""); + assert(ebp_off + wordSize*frame::return_addr_offset == sender_pc_offset_in_bytes(), ""); +} + +void MethodHandles::RicochetFrame::verify() const { + verify_offsets(); + assert(magic_number_1() == MAGIC_NUMBER_1, ""); + assert(magic_number_2() == MAGIC_NUMBER_2, ""); + if (!Universe::heap()->is_gc_active()) { + if (saved_args_layout() != NULL) { + assert(saved_args_layout()->is_method(), "must be valid oop"); + } + if (saved_target() != NULL) { + assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); + } + } + int conv_op = adapter_conversion_op(conversion()); + assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || + conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || + conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, + "must be a sane conversion"); + if (has_return_value_slot()) { + assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); + } +} +#endif //PRODUCT + +#ifdef ASSERT +void MethodHandles::verify_argslot(MacroAssembler* _masm, + Register argslot_reg, + const char* error_message) { // Verify that argslot lies within (rsp, rbp]. Label L_ok, L_bad; - BLOCK_COMMENT("{ verify_argslot"); + BLOCK_COMMENT("verify_argslot {"); __ cmpptr(argslot_reg, rbp); __ jccb(Assembler::above, L_bad); __ cmpptr(rsp, argslot_reg); __ jccb(Assembler::below, L_ok); __ bind(L_bad); __ stop(error_message); - __ bind(L_ok); + __ BIND(L_ok); BLOCK_COMMENT("} verify_argslot"); } -#endif +void MethodHandles::verify_argslots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register arg_slot_base_reg, + bool negate_argslots, + const char* error_message) { + // Verify that [argslot..argslot+size) lies within (rsp, rbp). + Label L_ok, L_bad; + Register rdi_temp = rdi; + BLOCK_COMMENT("verify_argslots {"); + __ push(rdi_temp); + if (negate_argslots) { + if (arg_slots.is_constant()) { + arg_slots = -1 * arg_slots.as_constant(); + } else { + __ movptr(rdi_temp, arg_slots); + __ negptr(rdi_temp); + arg_slots = rdi_temp; + } + } + __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale())); + __ cmpptr(rdi_temp, rbp); + __ pop(rdi_temp); + __ jcc(Assembler::above, L_bad); + __ cmpptr(rsp, arg_slot_base_reg); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop(error_message); + __ BIND(L_ok); + BLOCK_COMMENT("} verify_argslots"); +} + +// Make sure that arg_slots has the same sign as the given direction. +// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. +void MethodHandles::verify_stack_move(MacroAssembler* _masm, + RegisterOrConstant arg_slots, int direction) { + bool allow_zero = arg_slots.is_constant(); + if (direction == 0) { direction = +1; allow_zero = true; } + assert(stack_move_unit() == -1, "else add extra checks here"); + if (arg_slots.is_register()) { + Label L_ok, L_bad; + BLOCK_COMMENT("verify_stack_move {"); + // testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need + // jcc(Assembler::notZero, L_bad); + __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); + if (direction > 0) { + __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad); + __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); + __ jcc(Assembler::less, L_ok); + } else { + __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad); + __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); + __ jcc(Assembler::greater, L_ok); + } + __ bind(L_bad); + if (direction > 0) + __ stop("assert arg_slots > 0"); + else + __ stop("assert arg_slots < 0"); + __ BIND(L_ok); + BLOCK_COMMENT("} verify_stack_move"); + } else { + intptr_t size = arg_slots.as_constant(); + if (direction < 0) size = -size; + assert(size >= 0, "correct direction of constant move"); + assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); + } +} + +void MethodHandles::verify_klass(MacroAssembler* _masm, + Register obj, KlassHandle klass, + const char* error_message) { + oop* klass_addr = klass.raw_value(); + assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && + klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), + "must be one of the SystemDictionaryHandles"); + Register temp = rdi; + Label L_ok, L_bad; + BLOCK_COMMENT("verify_klass {"); + __ verify_oop(obj); + __ testptr(obj, obj); + __ jcc(Assembler::zero, L_bad); + __ push(temp); + __ load_klass(temp, obj); + __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ jcc(Assembler::equal, L_ok); + intptr_t super_check_offset = klass->super_check_offset(); + __ movptr(temp, Address(temp, super_check_offset)); + __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ jcc(Assembler::equal, L_ok); + __ pop(temp); + __ bind(L_bad); + __ stop(error_message); + __ BIND(L_ok); + __ pop(temp); + BLOCK_COMMENT("} verify_klass"); +} +#endif //ASSERT // Code generation address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { @@ -116,6 +568,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* address entry_point = __ pc(); // fetch the MethodType from the method handle into rax (the 'check' register) + // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. + // This would simplify several touchy bits of code. + // See 6984712: JSR 292 method handle calls need a clean argument base pointer { Register tem = rbx_method; for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { @@ -128,17 +583,23 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); Register rdx_vmslots = rdx_temp; __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); - __ movptr(rcx_recv, __ argument_address(rdx_vmslots)); + Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots); + __ movptr(rcx_recv, mh_receiver_slot_addr); trace_method_handle(_masm, "invokeExact"); __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type); + + // Nobody uses the MH receiver slot after this. Make sure. + DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999)); + __ jump_to_method_handle_entry(rcx_recv, rdi_temp); // for invokeGeneric (only), apply argument and result conversions on the fly __ bind(invoke_generic_slow_path); #ifdef ASSERT - { Label L; + if (VerifyMethodHandles) { + Label L; __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); __ jcc(Assembler::equal, L); __ stop("bad methodOop::intrinsic_id"); @@ -150,22 +611,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* // make room on the stack for another pointer: Register rcx_argslot = rcx_recv; __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); - insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, + insert_arg_slots(_masm, 2 * stack_move_unit(), rcx_argslot, rbx_temp, rdx_temp); // load up an adapter from the calling type (Java weaves this) - __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); Register rdx_adapter = rdx_temp; - // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes())); - // deal with old JDK versions: - __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); - __ cmpptr(rdi_temp, rdx_temp); - Label sorry_no_invoke_generic; - __ jcc(Assembler::below, sorry_no_invoke_generic); - - __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0)); - __ testptr(rdx_adapter, rdx_adapter); - __ jcc(Assembler::zero, sorry_no_invoke_generic); + __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); + __ load_heap_oop(rdx_adapter, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); + __ verify_oop(rdx_adapter); __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter); // As a trusted first argument, pass the type being called, so the adapter knows // the actual types of the arguments and return values. @@ -176,49 +629,31 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* trace_method_handle(_masm, "invokeGeneric"); __ jump_to_method_handle_entry(rcx, rdi_temp); - __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! - __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize)); // recover original MH - __ push(rax_mtype); // required mtype - __ push(rcx_recv); // bad mh (1st stacked argument) - __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); - return entry_point; } +// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. +static RegisterOrConstant constant(int value) { + return RegisterOrConstant(value); +} + // Helper to insert argument slots into the stack. -// arg_slots must be a multiple of stack_move_unit() and <= 0 +// arg_slots must be a multiple of stack_move_unit() and < 0 +// rax_argslot is decremented to point to the new (shifted) location of the argslot +// But, rdx_temp ends up holding the original value of rax_argslot. void MethodHandles::insert_arg_slots(MacroAssembler* _masm, RegisterOrConstant arg_slots, - int arg_mask, Register rax_argslot, - Register rbx_temp, Register rdx_temp, Register temp3_reg) { - assert(temp3_reg == noreg, "temp3 not required"); + Register rbx_temp, Register rdx_temp) { + // allow constant zero + if (arg_slots.is_constant() && arg_slots.as_constant() == 0) + return; assert_different_registers(rax_argslot, rbx_temp, rdx_temp, (!arg_slots.is_register() ? rsp : arg_slots.as_register())); - -#ifdef ASSERT - verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); - if (arg_slots.is_register()) { - Label L_ok, L_bad; - __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); - __ jccb(Assembler::greater, L_bad); - __ testl(arg_slots.as_register(), -stack_move_unit() - 1); - __ jccb(Assembler::zero, L_ok); - __ bind(L_bad); - __ stop("assert arg_slots <= 0 and clear low bits"); - __ bind(L_ok); - } else { - assert(arg_slots.as_constant() <= 0, ""); - assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); - } -#endif //ASSERT - -#ifdef _LP64 - if (arg_slots.is_register()) { - // clean high bits of stack motion register (was loaded as an int) - __ movslq(arg_slots.as_register(), arg_slots.as_register()); - } -#endif + if (VerifyMethodHandles) + verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); + if (VerifyMethodHandles) + verify_stack_move(_masm, arg_slots, -1); // Make space on the stack for the inserted argument(s). // Then pull down everything shallower than rax_argslot. @@ -230,59 +665,39 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm, // argslot -= size; BLOCK_COMMENT("insert_arg_slots {"); __ mov(rdx_temp, rsp); // source pointer for copy - __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); + __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); { Label loop; __ BIND(loop); // pull one word down each time through the loop __ movptr(rbx_temp, Address(rdx_temp, 0)); - __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); + __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); __ addptr(rdx_temp, wordSize); __ cmpptr(rdx_temp, rax_argslot); - __ jccb(Assembler::less, loop); + __ jcc(Assembler::below, loop); } // Now move the argslot down, to point to the opened-up space. - __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); + __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); BLOCK_COMMENT("} insert_arg_slots"); } // Helper to remove argument slots from the stack. -// arg_slots must be a multiple of stack_move_unit() and >= 0 +// arg_slots must be a multiple of stack_move_unit() and > 0 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, - RegisterOrConstant arg_slots, - Register rax_argslot, - Register rbx_temp, Register rdx_temp, Register temp3_reg) { - assert(temp3_reg == noreg, "temp3 not required"); + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp) { + // allow constant zero + if (arg_slots.is_constant() && arg_slots.as_constant() == 0) + return; assert_different_registers(rax_argslot, rbx_temp, rdx_temp, (!arg_slots.is_register() ? rsp : arg_slots.as_register())); - -#ifdef ASSERT - // Verify that [argslot..argslot+size) lies within (rsp, rbp). - __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr)); - verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame"); - if (arg_slots.is_register()) { - Label L_ok, L_bad; - __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); - __ jccb(Assembler::less, L_bad); - __ testl(arg_slots.as_register(), -stack_move_unit() - 1); - __ jccb(Assembler::zero, L_ok); - __ bind(L_bad); - __ stop("assert arg_slots >= 0 and clear low bits"); - __ bind(L_ok); - } else { - assert(arg_slots.as_constant() >= 0, ""); - assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); - } -#endif //ASSERT - -#ifdef _LP64 - if (false) { // not needed, since register is positive - // clean high bits of stack motion register (was loaded as an int) - if (arg_slots.is_register()) - __ movslq(arg_slots.as_register(), arg_slots.as_register()); - } -#endif + if (VerifyMethodHandles) + verify_argslots(_masm, arg_slots, rax_argslot, false, + "deleted argument(s) must fall within current frame"); + if (VerifyMethodHandles) + verify_stack_move(_masm, arg_slots, +1); BLOCK_COMMENT("remove_arg_slots {"); // Pull up everything shallower than rax_argslot. @@ -299,54 +714,344 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm, __ BIND(loop); // pull one word up each time through the loop __ movptr(rbx_temp, Address(rdx_temp, 0)); - __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); + __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); __ addptr(rdx_temp, -wordSize); __ cmpptr(rdx_temp, rsp); - __ jccb(Assembler::greaterEqual, loop); + __ jcc(Assembler::aboveEqual, loop); } // Now move the argslot up, to point to the just-copied block. - __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); + __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); // And adjust the argslot address to point at the deletion point. - __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); + __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); BLOCK_COMMENT("} remove_arg_slots"); } +// Helper to copy argument slots to the top of the stack. +// The sequence starts with rax_argslot and is counted by slot_count +// slot_count must be a multiple of stack_move_unit() and >= 0 +// This function blows the temps but does not change rax_argslot. +void MethodHandles::push_arg_slots(MacroAssembler* _masm, + Register rax_argslot, + RegisterOrConstant slot_count, + int skip_words_count, + Register rbx_temp, Register rdx_temp) { + assert_different_registers(rax_argslot, rbx_temp, rdx_temp, + (!slot_count.is_register() ? rbp : slot_count.as_register()), + rsp); + assert(Interpreter::stackElementSize == wordSize, "else change this code"); + + if (VerifyMethodHandles) + verify_stack_move(_masm, slot_count, 0); + + // allow constant zero + if (slot_count.is_constant() && slot_count.as_constant() == 0) + return; + + BLOCK_COMMENT("push_arg_slots {"); + + Register rbx_top = rbx_temp; + + // There is at most 1 word to carry down with the TOS. + switch (skip_words_count) { + case 1: __ pop(rdx_temp); break; + case 0: break; + default: ShouldNotReachHere(); + } + + if (slot_count.is_constant()) { + for (int i = slot_count.as_constant() - 1; i >= 0; i--) { + __ pushptr(Address(rax_argslot, i * wordSize)); + } + } else { + Label L_plural, L_loop, L_break; + // Emit code to dynamically check for the common cases, zero and one slot. + __ cmpl(slot_count.as_register(), (int32_t) 1); + __ jccb(Assembler::greater, L_plural); + __ jccb(Assembler::less, L_break); + __ pushptr(Address(rax_argslot, 0)); + __ jmpb(L_break); + __ BIND(L_plural); + + // Loop for 2 or more: + // rbx = &rax[slot_count] + // while (rbx > rax) *(--rsp) = *(--rbx) + __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr)); + __ BIND(L_loop); + __ subptr(rbx_top, wordSize); + __ pushptr(Address(rbx_top, 0)); + __ cmpptr(rbx_top, rax_argslot); + __ jcc(Assembler::above, L_loop); + __ bind(L_break); + } + switch (skip_words_count) { + case 1: __ push(rdx_temp); break; + case 0: break; + default: ShouldNotReachHere(); + } + BLOCK_COMMENT("} push_arg_slots"); +} + +// in-place movement; no change to rsp +// blows rax_temp, rdx_temp +void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, + Register rbx_bottom, // invariant + Address top_addr, // can use rax_temp + RegisterOrConstant positive_distance_in_slots, + Register rax_temp, Register rdx_temp) { + BLOCK_COMMENT("move_arg_slots_up {"); + assert_different_registers(rbx_bottom, + rax_temp, rdx_temp, + positive_distance_in_slots.register_or_noreg()); + Label L_loop, L_break; + Register rax_top = rax_temp; + if (!top_addr.is_same_address(Address(rax_top, 0))) + __ lea(rax_top, top_addr); + // Detect empty (or broken) loop: +#ifdef ASSERT + if (VerifyMethodHandles) { + // Verify that &bottom < &top (non-empty interval) + Label L_ok, L_bad; + if (positive_distance_in_slots.is_register()) { + __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0); + __ jcc(Assembler::lessEqual, L_bad); + } + __ cmpptr(rbx_bottom, rax_top); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop("valid bounds (copy up)"); + __ BIND(L_ok); + } +#endif + __ cmpptr(rbx_bottom, rax_top); + __ jccb(Assembler::aboveEqual, L_break); + // work rax down to rbx, copying contiguous data upwards + // In pseudo-code: + // [rbx, rax) = &[bottom, top) + // while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--; + __ BIND(L_loop); + __ subptr(rax_top, wordSize); + __ movptr(rdx_temp, Address(rax_top, 0)); + __ movptr( Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp); + __ cmpptr(rax_top, rbx_bottom); + __ jcc(Assembler::above, L_loop); + assert(Interpreter::stackElementSize == wordSize, "else change loop"); + __ bind(L_break); + BLOCK_COMMENT("} move_arg_slots_up"); +} + +// in-place movement; no change to rsp +// blows rax_temp, rdx_temp +void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, + Address bottom_addr, // can use rax_temp + Register rbx_top, // invariant + RegisterOrConstant negative_distance_in_slots, + Register rax_temp, Register rdx_temp) { + BLOCK_COMMENT("move_arg_slots_down {"); + assert_different_registers(rbx_top, + negative_distance_in_slots.register_or_noreg(), + rax_temp, rdx_temp); + Label L_loop, L_break; + Register rax_bottom = rax_temp; + if (!bottom_addr.is_same_address(Address(rax_bottom, 0))) + __ lea(rax_bottom, bottom_addr); + // Detect empty (or broken) loop: +#ifdef ASSERT + assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); + if (VerifyMethodHandles) { + // Verify that &bottom < &top (non-empty interval) + Label L_ok, L_bad; + if (negative_distance_in_slots.is_register()) { + __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0); + __ jcc(Assembler::greaterEqual, L_bad); + } + __ cmpptr(rax_bottom, rbx_top); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop("valid bounds (copy down)"); + __ BIND(L_ok); + } +#endif + __ cmpptr(rax_bottom, rbx_top); + __ jccb(Assembler::aboveEqual, L_break); + // work rax up to rbx, copying contiguous data downwards + // In pseudo-code: + // [rax, rbx) = &[bottom, top) + // while (rax < rbx) *(rax - distance) = *(rax + 0), rax++; + __ BIND(L_loop); + __ movptr(rdx_temp, Address(rax_bottom, 0)); + __ movptr( Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp); + __ addptr(rax_bottom, wordSize); + __ cmpptr(rax_bottom, rbx_top); + __ jcc(Assembler::below, L_loop); + assert(Interpreter::stackElementSize == wordSize, "else change loop"); + __ bind(L_break); + BLOCK_COMMENT("} move_arg_slots_down"); +} + +// Copy from a field or array element to a stacked argument slot. +// is_element (ignored) says whether caller is loading an array element instead of an instance field. +void MethodHandles::move_typed_arg(MacroAssembler* _masm, + BasicType type, bool is_element, + Address slot_dest, Address value_src, + Register rbx_temp, Register rdx_temp) { + BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); + if (type == T_OBJECT || type == T_ARRAY) { + __ load_heap_oop(rbx_temp, value_src); + __ movptr(slot_dest, rbx_temp); + } else if (type != T_VOID) { + int arg_size = type2aelembytes(type); + bool arg_is_signed = is_signed_subword_type(type); + int slot_size = (arg_size > wordSize) ? arg_size : wordSize; + __ load_sized_value( rdx_temp, value_src, arg_size, arg_is_signed, rbx_temp); + __ store_sized_value( slot_dest, rdx_temp, slot_size, rbx_temp); + } + BLOCK_COMMENT("} move_typed_arg"); +} + +void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, + Address return_slot) { + BLOCK_COMMENT("move_return_value {"); + // Old versions of the JVM must clean the FPU stack after every return. +#ifndef _LP64 +#ifdef COMPILER2 + // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases + if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) { + for (int i = 1; i < 8; i++) { + __ ffree(i); + } + } else if (UseSSE < 2) { + __ empty_FPU_stack(); + } +#endif //COMPILER2 +#endif //!_LP64 + + // Look at the type and pull the value out of the corresponding register. + if (type == T_VOID) { + // nothing to do + } else if (type == T_OBJECT) { + __ movptr(return_slot, rax); + } else if (type == T_INT || is_subword_type(type)) { + // write the whole word, even if only 32 bits is significant + __ movptr(return_slot, rax); + } else if (type == T_LONG) { + // store the value by parts + // Note: We assume longs are continguous (if misaligned) on the interpreter stack. + __ store_sized_value(return_slot, rax, BytesPerLong, rdx); + } else if (NOT_LP64((type == T_FLOAT && UseSSE < 1) || + (type == T_DOUBLE && UseSSE < 2) ||) + false) { + // Use old x86 FPU registers: + if (type == T_FLOAT) + __ fstp_s(return_slot); + else + __ fstp_d(return_slot); + } else if (type == T_FLOAT) { + __ movflt(return_slot, xmm0); + } else if (type == T_DOUBLE) { + __ movdbl(return_slot, xmm0); + } else { + ShouldNotReachHere(); + } + BLOCK_COMMENT("} move_return_value"); +} + + #ifndef PRODUCT extern "C" void print_method_handle(oop mh); void trace_method_handle_stub(const char* adaptername, - intptr_t* saved_sp, oop mh, - intptr_t* sp) { + intptr_t* saved_regs, + intptr_t* entry_sp, + intptr_t* saved_sp, + intptr_t* saved_bp) { // called as a leaf from native code: do not block the JVM! - intptr_t* entry_sp = sp + LP64_ONLY(16) NOT_LP64(8); - tty->print_cr("MH %s mh="INTPTR_FORMAT" sp="INTPTR_FORMAT" saved_sp="INTPTR_FORMAT")", - adaptername, (intptr_t)mh, (intptr_t)entry_sp, saved_sp); + bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh + intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset]; + intptr_t* base_sp = last_sp; + typedef MethodHandles::RicochetFrame RicochetFrame; + RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes()); + if (!UseRicochetFrames || Universe::heap()->is_in((address) rfp->saved_args_base())) { + // Probably an interpreter frame. + base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; + } + intptr_t mh_reg = (intptr_t)mh; + const char* mh_reg_name = "rcx_mh"; + if (!has_mh) mh_reg_name = "rcx"; + tty->print_cr("MH %s %s="PTR_FORMAT" sp=("PTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="PTR_FORMAT, + adaptername, mh_reg_name, mh_reg, + (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); if (Verbose) { - print_method_handle(mh); + tty->print(" reg dump: "); + int saved_regs_count = (entry_sp-1) - saved_regs; + // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax + int i; + for (i = 0; i <= saved_regs_count; i++) { + if (i > 0 && i % 4 == 0 && i != saved_regs_count) { + tty->cr(); + tty->print(" + dump: "); + } + tty->print(" %d: "PTR_FORMAT, i, saved_regs[i]); + } + tty->cr(); + if (last_sp != saved_sp && last_sp != NULL) + tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp); + int stack_dump_count = 16; + if (stack_dump_count < (int)(saved_bp + 2 - saved_sp)) + stack_dump_count = (int)(saved_bp + 2 - saved_sp); + if (stack_dump_count > 64) stack_dump_count = 48; + for (i = 0; i < stack_dump_count; i += 4) { + tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT, + i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]); + } + if (has_mh) + print_method_handle(mh); } } + +// The stub wraps the arguments in a struct on the stack to avoid +// dealing with the different calling conventions for passing 6 +// arguments. +struct MethodHandleStubArguments { + const char* adaptername; + oopDesc* mh; + intptr_t* saved_regs; + intptr_t* entry_sp; + intptr_t* saved_sp; + intptr_t* saved_bp; +}; +void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { + trace_method_handle_stub(args->adaptername, + args->mh, + args->saved_regs, + args->entry_sp, + args->saved_sp, + args->saved_bp); +} + void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { if (!TraceMethodHandles) return; BLOCK_COMMENT("trace_method_handle {"); + __ push(rax); + __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp __ pusha(); __ pusha(); -#ifdef _LP64 - // Pass arguments carefully since the registers overlap with the calling convention. + __ mov(rbx, rsp); + __ enter(); + // incoming state: // rcx: method handle - // r13: saved sp - __ mov(c_rarg2, rcx); // mh - __ mov(c_rarg1, r13); // saved sp - __ mov(c_rarg3, rsp); // sp - __ movptr(c_rarg0, (intptr_t) adaptername); - __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), c_rarg0, c_rarg1, c_rarg2, c_rarg3); -#else - // arguments: - // rcx: method handle - // rsi: saved sp - __ movptr(rbx, (intptr_t) adaptername); - __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), rbx, rsi, rcx, rsp); -#endif + // r13 or rsi: saved sp + // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead. + __ push(rbp); // saved_bp + __ push(rsi); // saved_sp + __ push(rax); // entry_sp + __ push(rbx); // pusha saved_regs + __ push(rcx); // mh + __ push(rcx); // adaptername + __ movptr(Address(rsp, 0), (intptr_t) adaptername); + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); + __ leave(); __ popa(); + __ pop(rax); BLOCK_COMMENT("} trace_method_handle"); } #endif //PRODUCT @@ -358,13 +1063,20 @@ int MethodHandles::adapter_conversion_ops_supported_mask() { |(1<= _bound_ref_direct_mh); - BasicType arg_type = T_ILLEGAL; - int arg_mask = _INSERT_NO_MASK; - int arg_slots = -1; - get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); + BasicType arg_type = ek_bound_mh_arg_type(ek); + int arg_slots = type2size[arg_type]; // make room for the new argument: __ movl(rax_argslot, rcx_bmh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); - insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp); + insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); // store bound argument into the new stack slot: __ load_heap_oop(rbx_temp, rcx_bmh_argument); @@ -589,9 +1308,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan __ movptr(Address(rax_argslot, 0), rbx_temp); } else { Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); - const int arg_size = type2aelembytes(arg_type); - __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp); - __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp); + move_typed_arg(_masm, arg_type, false, + Address(rax_argslot, 0), + prim_value_addr, + rbx_temp, rdx_temp); } if (direct_to_method) { @@ -628,7 +1348,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan // What class are we casting to? __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! - __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); + load_klass_from_Class(_masm, rbx_klass); Label done; __ movptr(rdx_temp, vmarg); @@ -663,6 +1383,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan case _adapter_prim_to_prim: case _adapter_ref_to_prim: + case _adapter_prim_to_ref: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; @@ -714,8 +1435,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan // Do the requested conversion and store the value. Register rbx_vminfo = rbx_temp; - __ movl(rbx_vminfo, rcx_amh_conversion); - assert(CONV_VMINFO_SHIFT == 0, "preshifted"); + load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); // get the new MH: __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); @@ -753,7 +1473,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan // on a little-endian machine we keep the first slot and add another after __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); - insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, + insert_arg_slots(_masm, stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); Address vmarg1(rax_argslot, -Interpreter::stackElementSize); Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize); @@ -805,7 +1525,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); if (ek == _adapter_opt_f2d) { - insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, + insert_arg_slots(_masm, stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); } Address vmarg(rax_argslot, -Interpreter::stackElementSize); @@ -823,7 +1543,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan #else //_LP64 if (ek == _adapter_opt_f2d) { __ fld_s(vmarg); // load float to ST0 - __ fstp_s(vmarg); // store single + __ fstp_d(vmarg); // store double } else { __ fld_d(vmarg); // load double to ST0 __ fstp_s(vmarg); // store single @@ -840,10 +1560,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan } break; - case _adapter_prim_to_ref: - __ unimplemented(entry_name(ek)); // %%% FIXME: NYI - break; - case _adapter_swap_args: case _adapter_rot_args: // handled completely by optimized cases @@ -857,8 +1573,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan case _adapter_opt_rot_2_up: case _adapter_opt_rot_2_down: { - int swap_bytes = 0, rotate = 0; - get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); + int swap_slots = ek_adapter_opt_swap_slots(ek); + int rotate = ek_adapter_opt_swap_mode(ek); // 'argslot' is the position of the first argument to swap __ movl(rax_argslot, rcx_amh_vmargslot); @@ -866,83 +1582,69 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan // 'vminfo' is the second Register rbx_destslot = rbx_temp; - __ movl(rbx_destslot, rcx_amh_conversion); - assert(CONV_VMINFO_SHIFT == 0, "preshifted"); - __ andl(rbx_destslot, CONV_VMINFO_MASK); + load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion); __ lea(rbx_destslot, __ argument_address(rbx_destslot)); - DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame")); + if (VerifyMethodHandles) + verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"); + assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); if (!rotate) { - for (int i = 0; i < swap_bytes; i += wordSize) { - __ movptr(rdx_temp, Address(rax_argslot , i)); - __ push(rdx_temp); - __ movptr(rdx_temp, Address(rbx_destslot, i)); - __ movptr(Address(rax_argslot, i), rdx_temp); - __ pop(rdx_temp); - __ movptr(Address(rbx_destslot, i), rdx_temp); + // simple swap + for (int i = 0; i < swap_slots; i++) { + __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); + __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize)); + __ movptr(Address(rax_argslot, i * wordSize), rdx_temp); + __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); } } else { - // push the first chunk, which is going to get overwritten - for (int i = swap_bytes; (i -= wordSize) >= 0; ) { - __ movptr(rdx_temp, Address(rax_argslot, i)); - __ push(rdx_temp); + // A rotate is actually pair of moves, with an "odd slot" (or pair) + // changing place with a series of other slots. + // First, push the "odd slot", which is going to get overwritten + for (int i = swap_slots - 1; i >= 0; i--) { + // handle one with rdi_temp instead of a push: + if (i == 0) __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); + else __ pushptr( Address(rax_argslot, i * wordSize)); } - if (rotate > 0) { - // rotate upward - __ subptr(rax_argslot, swap_bytes); -#ifdef ASSERT - { - // Verify that argslot > destslot, by at least swap_bytes. - Label L_ok; - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::aboveEqual, L_ok); - __ stop("source must be above destination (upward rotation)"); - __ bind(L_ok); - } -#endif + // Here is rotate > 0: + // (low mem) (high mem) + // | dest: more_slots... | arg: odd_slot :arg+1 | + // => + // | dest: odd_slot | dest+1: more_slots... :arg+1 | // work argslot down to destslot, copying contiguous data upwards // pseudo-code: // rax = src_addr - swap_bytes // rbx = dest_addr // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; - Label loop; - __ bind(loop); - __ movptr(rdx_temp, Address(rax_argslot, 0)); - __ movptr(Address(rax_argslot, swap_bytes), rdx_temp); - __ addptr(rax_argslot, -wordSize); - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::aboveEqual, loop); + move_arg_slots_up(_masm, + rbx_destslot, + Address(rax_argslot, 0), + swap_slots, + rax_argslot, rdx_temp); } else { - __ addptr(rax_argslot, swap_bytes); -#ifdef ASSERT - { - // Verify that argslot < destslot, by at least swap_bytes. - Label L_ok; - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::belowEqual, L_ok); - __ stop("source must be below destination (downward rotation)"); - __ bind(L_ok); - } -#endif + // Here is the other direction, rotate < 0: + // (low mem) (high mem) + // | arg: odd_slot | arg+1: more_slots... :dest+1 | + // => + // | arg: more_slots... | dest: odd_slot :dest+1 | // work argslot up to destslot, copying contiguous data downwards // pseudo-code: // rax = src_addr + swap_bytes // rbx = dest_addr // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; - Label loop; - __ bind(loop); - __ movptr(rdx_temp, Address(rax_argslot, 0)); - __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp); - __ addptr(rax_argslot, wordSize); - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::belowEqual, loop); - } + __ addptr(rbx_destslot, wordSize); + move_arg_slots_down(_masm, + Address(rax_argslot, swap_slots * wordSize), + rbx_destslot, + -swap_slots, + rax_argslot, rdx_temp); + __ subptr(rbx_destslot, wordSize); + } // pop the original first chunk into the destination slot, now free - for (int i = 0; i < swap_bytes; i += wordSize) { - __ pop(rdx_temp); - __ movptr(Address(rbx_destslot, i), rdx_temp); + for (int i = 0; i < swap_slots; i++) { + if (i == 0) __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); + else __ popptr(Address(rbx_destslot, i * wordSize)); } } @@ -958,53 +1660,22 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan __ lea(rax_argslot, __ argument_address(rax_argslot)); // 'stack_move' is negative number of words to duplicate - Register rdx_stack_move = rdx_temp; - __ movl2ptr(rdx_stack_move, rcx_amh_conversion); - __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT); + Register rdi_stack_move = rdi_temp; + load_stack_move(_masm, rdi_stack_move, rcx_recv, true); - int argslot0_num = 0; - Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); - assert(argslot0.base() == rsp, ""); - int pre_arg_size = argslot0.disp(); - assert(pre_arg_size % wordSize == 0, ""); - assert(pre_arg_size > 0, "must include PC"); - - // remember the old rsp+1 (argslot[0]) - Register rbx_oldarg = rbx_temp; - __ lea(rbx_oldarg, argslot0); - - // move rsp down to make room for dups - __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr)); - - // compute the new rsp+1 (argslot[0]) - Register rdx_newarg = rdx_temp; - __ lea(rdx_newarg, argslot0); - - __ push(rdi); // need a temp - // (preceding push must be done after arg addresses are taken!) - - // pull down the pre_arg_size data (PC) - for (int i = -pre_arg_size; i < 0; i += wordSize) { - __ movptr(rdi, Address(rbx_oldarg, i)); - __ movptr(Address(rdx_newarg, i), rdi); + if (VerifyMethodHandles) { + verify_argslots(_masm, rdi_stack_move, rax_argslot, true, + "copied argument(s) must fall within current frame"); } - // copy from rax_argslot[0...] down to new_rsp[1...] - // pseudo-code: - // rbx = old_rsp+1 - // rdx = new_rsp+1 - // rax = argslot - // while (rdx < rbx) *rdx++ = *rax++ - Label loop; - __ bind(loop); - __ movptr(rdi, Address(rax_argslot, 0)); - __ movptr(Address(rdx_newarg, 0), rdi); - __ addptr(rax_argslot, wordSize); - __ addptr(rdx_newarg, wordSize); - __ cmpptr(rdx_newarg, rbx_oldarg); - __ jccb(Assembler::less, loop); + // insert location is always the bottom of the argument list: + Address insert_location = __ argument_address(constant(0)); + int pre_arg_words = insert_location.disp() / wordSize; // return PC is pushed + assert(insert_location.base() == rsp, ""); - __ pop(rdi); // restore temp + __ negl(rdi_stack_move); + push_arg_slots(_masm, rax_argslot, rdi_stack_move, + pre_arg_words, rbx_temp, rdx_temp); __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); @@ -1017,63 +1688,583 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); - __ push(rdi); // need a temp // (must do previous push after argslot address is taken) // 'stack_move' is number of words to drop - Register rdi_stack_move = rdi; - __ movl2ptr(rdi_stack_move, rcx_amh_conversion); - __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); + Register rdi_stack_move = rdi_temp; + load_stack_move(_masm, rdi_stack_move, rcx_recv, false); remove_arg_slots(_masm, rdi_stack_move, rax_argslot, rbx_temp, rdx_temp); - __ pop(rdi); // restore temp - __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_collect_args: - __ unimplemented(entry_name(ek)); // %%% FIXME: NYI - break; - + case _adapter_fold_args: case _adapter_spread_args: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; + case _adapter_opt_collect_ref: + case _adapter_opt_collect_int: + case _adapter_opt_collect_long: + case _adapter_opt_collect_float: + case _adapter_opt_collect_double: + case _adapter_opt_collect_void: + case _adapter_opt_collect_0_ref: + case _adapter_opt_collect_1_ref: + case _adapter_opt_collect_2_ref: + case _adapter_opt_collect_3_ref: + case _adapter_opt_collect_4_ref: + case _adapter_opt_collect_5_ref: + case _adapter_opt_filter_S0_ref: + case _adapter_opt_filter_S1_ref: + case _adapter_opt_filter_S2_ref: + case _adapter_opt_filter_S3_ref: + case _adapter_opt_filter_S4_ref: + case _adapter_opt_filter_S5_ref: + case _adapter_opt_collect_2_S0_ref: + case _adapter_opt_collect_2_S1_ref: + case _adapter_opt_collect_2_S2_ref: + case _adapter_opt_collect_2_S3_ref: + case _adapter_opt_collect_2_S4_ref: + case _adapter_opt_collect_2_S5_ref: + case _adapter_opt_fold_ref: + case _adapter_opt_fold_int: + case _adapter_opt_fold_long: + case _adapter_opt_fold_float: + case _adapter_opt_fold_double: + case _adapter_opt_fold_void: + case _adapter_opt_fold_1_ref: + case _adapter_opt_fold_2_ref: + case _adapter_opt_fold_3_ref: + case _adapter_opt_fold_4_ref: + case _adapter_opt_fold_5_ref: + { + // Given a fresh incoming stack frame, build a new ricochet frame. + // On entry, TOS points at a return PC, and RBP is the callers frame ptr. + // RSI/R13 has the caller's exact stack pointer, which we must also preserve. + // RCX contains an AdapterMethodHandle of the indicated kind. + + // Relevant AMH fields: + // amh.vmargslot: + // points to the trailing edge of the arguments + // to filter, collect, or fold. For a boxing operation, + // it points just after the single primitive value. + // amh.argument: + // recursively called MH, on |collect| arguments + // amh.vmtarget: + // final destination MH, on return value, etc. + // amh.conversion.dest: + // tells what is the type of the return value + // (not needed here, since dest is also derived from ek) + // amh.conversion.vminfo: + // points to the trailing edge of the return value + // when the vmtarget is to be called; this is + // equal to vmargslot + (retained ? |collect| : 0) + + // Pass 0 or more argument slots to the recursive target. + int collect_count_constant = ek_adapter_opt_collect_count(ek); + + // The collected arguments are copied from the saved argument list: + int collect_slot_constant = ek_adapter_opt_collect_slot(ek); + + assert(ek_orig == _adapter_collect_args || + ek_orig == _adapter_fold_args, ""); + bool retain_original_args = (ek_orig == _adapter_fold_args); + + // The return value is replaced (or inserted) at the 'vminfo' argslot. + // Sometimes we can compute this statically. + int dest_slot_constant = -1; + if (!retain_original_args) + dest_slot_constant = collect_slot_constant; + else if (collect_slot_constant >= 0 && collect_count_constant >= 0) + // We are preserving all the arguments, and the return value is prepended, + // so the return slot is to the left (above) the |collect| sequence. + dest_slot_constant = collect_slot_constant + collect_count_constant; + + // Replace all those slots by the result of the recursive call. + // The result type can be one of ref, int, long, float, double, void. + // In the case of void, nothing is pushed on the stack after return. + BasicType dest = ek_adapter_opt_collect_type(ek); + assert(dest == type2wfield[dest], "dest is a stack slot type"); + int dest_count = type2size[dest]; + assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); + + // Choose a return continuation. + EntryKind ek_ret = _adapter_opt_return_any; + if (dest != T_CONFLICT && OptimizeMethodHandles) { + switch (dest) { + case T_INT : ek_ret = _adapter_opt_return_int; break; + case T_LONG : ek_ret = _adapter_opt_return_long; break; + case T_FLOAT : ek_ret = _adapter_opt_return_float; break; + case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; + case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; + case T_VOID : ek_ret = _adapter_opt_return_void; break; + default : ShouldNotReachHere(); + } + if (dest == T_OBJECT && dest_slot_constant >= 0) { + EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); + if (ek_try <= _adapter_opt_return_LAST && + ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { + ek_ret = ek_try; + } + } + assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); + } + + // Already pushed: ... keep1 | collect | keep2 | sender_pc | + // push(sender_pc); + + // Compute argument base: + Register rax_argv = rax_argslot; + __ lea(rax_argv, __ argument_address(constant(0))); + + // Push a few extra argument words, if we need them to store the return value. + { + int extra_slots = 0; + if (retain_original_args) { + extra_slots = dest_count; + } else if (collect_count_constant == -1) { + extra_slots = dest_count; // collect_count might be zero; be generous + } else if (dest_count > collect_count_constant) { + extra_slots = (dest_count - collect_count_constant); + } else { + // else we know we have enough dead space in |collect| to repurpose for return values + } + DEBUG_ONLY(extra_slots += 1); + if (extra_slots > 0) { + __ pop(rbx_temp); // return value + __ subptr(rsp, (extra_slots * Interpreter::stackElementSize)); + // Push guard word #2 in debug mode. + DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2)); + __ push(rbx_temp); + } + } + + RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv, + entry(ek_ret)->from_interpreted_entry(), rbx_temp); + + // Now pushed: ... keep1 | collect | keep2 | RF | + // some handy frame slots: + Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()); + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); + +#ifdef ASSERT + if (VerifyMethodHandles && dest != T_CONFLICT) { + BLOCK_COMMENT("verify AMH.conv.dest"); + load_conversion_dest_type(_masm, rbx_temp, conversion_addr); + Label L_dest_ok; + __ cmpl(rbx_temp, (int) dest); + __ jcc(Assembler::equal, L_dest_ok); + if (dest == T_INT) { + for (int bt = T_BOOLEAN; bt < T_INT; bt++) { + if (is_subword_type(BasicType(bt))) { + __ cmpl(rbx_temp, (int) bt); + __ jcc(Assembler::equal, L_dest_ok); + } + } + } + __ stop("bad dest in AMH.conv"); + __ BIND(L_dest_ok); + } +#endif //ASSERT + + // Find out where the original copy of the recursive argument sequence begins. + Register rax_coll = rax_argv; + { + RegisterOrConstant collect_slot = collect_slot_constant; + if (collect_slot_constant == -1) { + __ movl(rdi_temp, rcx_amh_vmargslot); + collect_slot = rdi_temp; + } + if (collect_slot_constant != 0) + __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale())); + // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2| + } + + // Replace the old AMH with the recursive MH. (No going back now.) + // In the case of a boxing call, the recursive call is to a 'boxer' method, + // such as Integer.valueOf or Long.valueOf. In the case of a filter + // or collect call, it will take one or more arguments, transform them, + // and return some result, to store back into argument_base[vminfo]. + __ load_heap_oop(rcx_recv, rcx_amh_argument); + if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); + + // Push a space for the recursively called MH first: + __ push((int32_t)NULL_WORD); + + // Calculate |collect|, the number of arguments we are collecting. + Register rdi_collect_count = rdi_temp; + RegisterOrConstant collect_count; + if (collect_count_constant >= 0) { + collect_count = collect_count_constant; + } else { + __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp); + collect_count = rdi_collect_count; + } +#ifdef ASSERT + if (VerifyMethodHandles && collect_count_constant >= 0) { + __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp); + Label L_count_ok; + __ cmpl(rbx_temp, collect_count_constant); + __ jcc(Assembler::equal, L_count_ok); + __ stop("bad vminfo in AMH.conv"); + __ BIND(L_count_ok); + } +#endif //ASSERT + + // copy |collect| slots directly to TOS: + push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp); + // Now pushed: ... keep1 | collect | keep2 | RF... | collect | + // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2| + + // If necessary, adjust the saved arguments to make room for the eventual return value. + // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | + // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | + // In the non-retaining case, this might move keep2 either up or down. + // We don't have to copy the whole | RF... collect | complex, + // but we must adjust RF.saved_args_base. + // Also, from now on, we will forget about the origial copy of |collect|. + // If we are retaining it, we will treat it as part of |keep2|. + // For clarity we will define |keep3| = |collect|keep2| or |keep2|. + + BLOCK_COMMENT("adjust trailing arguments {"); + // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. + int open_count = dest_count; + RegisterOrConstant close_count = collect_count_constant; + Register rdi_close_count = rdi_collect_count; + if (retain_original_args) { + close_count = constant(0); + } else if (collect_count_constant == -1) { + close_count = rdi_collect_count; + } + + // How many slots need moving? This is simply dest_slot (0 => no |keep3|). + RegisterOrConstant keep3_count; + Register rsi_keep3_count = rsi; // can repair from RF.exact_sender_sp + if (dest_slot_constant >= 0) { + keep3_count = dest_slot_constant; + } else { + load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr); + keep3_count = rsi_keep3_count; + } +#ifdef ASSERT + if (VerifyMethodHandles && dest_slot_constant >= 0) { + load_conversion_vminfo(_masm, rbx_temp, conversion_addr); + Label L_vminfo_ok; + __ cmpl(rbx_temp, dest_slot_constant); + __ jcc(Assembler::equal, L_vminfo_ok); + __ stop("bad vminfo in AMH.conv"); + __ BIND(L_vminfo_ok); + } +#endif //ASSERT + + // tasks remaining: + bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); + bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); + bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); + + if (stomp_dest | fix_arg_base) { + // we will probably need an updated rax_argv value + if (collect_slot_constant >= 0) { + // rax_coll already holds the leading edge of |keep2|, so tweak it + assert(rax_coll == rax_argv, "elided a move"); + if (collect_slot_constant != 0) + __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize); + } else { + // Just reload from RF.saved_args_base. + __ movptr(rax_argv, saved_args_base_addr); + } + } + + // Old and new argument locations (based at slot 0). + // Net shift (&new_argv - &old_argv) is (close_count - open_count). + bool zero_open_count = (open_count == 0); // remember this bit of info + if (move_keep3 && fix_arg_base) { + // It will be easier t have everything in one register: + if (close_count.is_register()) { + // Deduct open_count from close_count register to get a clean +/- value. + __ subptr(close_count.as_register(), open_count); + } else { + close_count = close_count.as_constant() - open_count; + } + open_count = 0; + } + Address old_argv(rax_argv, 0); + Address new_argv(rax_argv, close_count, Interpreter::stackElementScale(), + - open_count * Interpreter::stackElementSize); + + // First decide if any actual data are to be moved. + // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. + // (As it happens, all movements involve an argument list size change.) + + // If there are variable parameters, use dynamic checks to skip around the whole mess. + Label L_done; + if (!keep3_count.is_constant()) { + __ testl(keep3_count.as_register(), keep3_count.as_register()); + __ jcc(Assembler::zero, L_done); + } + if (!close_count.is_constant()) { + __ cmpl(close_count.as_register(), open_count); + __ jcc(Assembler::equal, L_done); + } + + if (move_keep3 && fix_arg_base) { + bool emit_move_down = false, emit_move_up = false, emit_guard = false; + if (!close_count.is_constant()) { + emit_move_down = emit_guard = !zero_open_count; + emit_move_up = true; + } else if (open_count != close_count.as_constant()) { + emit_move_down = (open_count > close_count.as_constant()); + emit_move_up = !emit_move_down; + } + Label L_move_up; + if (emit_guard) { + __ cmpl(close_count.as_register(), open_count); + __ jcc(Assembler::greater, L_move_up); + } + + if (emit_move_down) { + // Move arguments down if |+dest+| > |-collect-| + // (This is rare, except when arguments are retained.) + // This opens space for the return value. + if (keep3_count.is_constant()) { + for (int i = 0; i < keep3_count.as_constant(); i++) { + __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); + __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); + } + } else { + Register rbx_argv_top = rbx_temp; + __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale())); + move_arg_slots_down(_masm, + old_argv, // beginning of old argv + rbx_argv_top, // end of old argv + close_count, // distance to move down (must be negative) + rax_argv, rdx_temp); + // Used argv as an iteration variable; reload from RF.saved_args_base. + __ movptr(rax_argv, saved_args_base_addr); + } + } + + if (emit_guard) { + __ jmp(L_done); // assumes emit_move_up is true also + __ BIND(L_move_up); + } + + if (emit_move_up) { + + // Move arguments up if |+dest+| < |-collect-| + // (This is usual, except when |keep3| is empty.) + // This closes up the space occupied by the now-deleted collect values. + if (keep3_count.is_constant()) { + for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { + __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); + __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); + } + } else { + Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()); + move_arg_slots_up(_masm, + rax_argv, // beginning of old argv + argv_top, // end of old argv + close_count, // distance to move up (must be positive) + rbx_temp, rdx_temp); + } + } + } + __ BIND(L_done); + + if (fix_arg_base) { + // adjust RF.saved_args_base by adding (close_count - open_count) + if (!new_argv.is_same_address(Address(rax_argv, 0))) + __ lea(rax_argv, new_argv); + __ movptr(saved_args_base_addr, rax_argv); + } + + if (stomp_dest) { + // Stomp the return slot, so it doesn't hold garbage. + // This isn't strictly necessary, but it may help detect bugs. + int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER; + __ movptr(Address(rax_argv, keep3_count, Address::times_ptr), + (int32_t) forty_two); + // uses rsi_keep3_count + } + BLOCK_COMMENT("} adjust trailing arguments"); + + BLOCK_COMMENT("do_recursive_call"); + __ mov(saved_last_sp, rsp); // set rsi/r13 for callee + __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); + // The globally unique bounce address has two purposes: + // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). + // 2. When returned to, it cuts back the stack and redirects control flow + // to the return handler. + // The return handler will further cut back the stack when it takes + // down the RF. Perhaps there is a way to streamline this further. + + // State during recursive call: + // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + + break; + } + + case _adapter_opt_return_ref: + case _adapter_opt_return_int: + case _adapter_opt_return_long: + case _adapter_opt_return_float: + case _adapter_opt_return_double: + case _adapter_opt_return_void: + case _adapter_opt_return_S0_ref: + case _adapter_opt_return_S1_ref: + case _adapter_opt_return_S2_ref: + case _adapter_opt_return_S3_ref: + case _adapter_opt_return_S4_ref: + case _adapter_opt_return_S5_ref: + { + BasicType dest_type_constant = ek_adapter_opt_return_type(ek); + int dest_slot_constant = ek_adapter_opt_return_slot(ek); + + if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); + + if (dest_slot_constant == -1) { + // The current stub is a general handler for this dest_type. + // It can be called from _adapter_opt_return_any below. + // Stash the address in a little table. + assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); + address return_handler = __ pc(); + _adapter_return_handlers[dest_type_constant] = return_handler; + if (dest_type_constant == T_INT) { + // do the subword types too + for (int bt = T_BOOLEAN; bt < T_INT; bt++) { + if (is_subword_type(BasicType(bt)) && + _adapter_return_handlers[bt] == NULL) { + _adapter_return_handlers[bt] = return_handler; + } + } + } + } + + Register rbx_arg_base = rbx_temp; + assert_different_registers(rax, rdx, // possibly live return value registers + rdi_temp, rbx_arg_base); + + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); + + __ movptr(rbx_arg_base, saved_args_base_addr); + RegisterOrConstant dest_slot = dest_slot_constant; + if (dest_slot_constant == -1) { + load_conversion_vminfo(_masm, rdi_temp, conversion_addr); + dest_slot = rdi_temp; + } + // Store the result back into the argslot. + // This code uses the interpreter calling sequence, in which the return value + // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. + // There are certain irregularities with floating point values, which can be seen + // in TemplateInterpreterGenerator::generate_return_entry_for. + move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale())); + + RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp); + __ push(rdx_temp); // repush the return PC + + // Load the final target and go. + if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + __ hlt(); // -------------------- + break; + } + + case _adapter_opt_return_any: + { + if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); + Register rdi_conv = rdi_temp; + assert_different_registers(rax, rdx, // possibly live return value registers + rdi_conv, rbx_temp); + + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + load_conversion_dest_type(_masm, rdi_conv, conversion_addr); + __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0])); + __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr)); + +#ifdef ASSERT + { Label L_badconv; + __ testptr(rbx_temp, rbx_temp); + __ jccb(Assembler::zero, L_badconv); + __ jmp(rbx_temp); + __ bind(L_badconv); + __ stop("bad method handle return"); + } +#else //ASSERT + __ jmp(rbx_temp); +#endif //ASSERT + break; + } + case _adapter_opt_spread_0: - case _adapter_opt_spread_1: - case _adapter_opt_spread_more: + case _adapter_opt_spread_1_ref: + case _adapter_opt_spread_2_ref: + case _adapter_opt_spread_3_ref: + case _adapter_opt_spread_4_ref: + case _adapter_opt_spread_5_ref: + case _adapter_opt_spread_ref: + case _adapter_opt_spread_byte: + case _adapter_opt_spread_char: + case _adapter_opt_spread_short: + case _adapter_opt_spread_int: + case _adapter_opt_spread_long: + case _adapter_opt_spread_float: + case _adapter_opt_spread_double: { // spread an array out into a group of arguments - int length_constant = get_ek_adapter_opt_spread_info(ek); + int length_constant = ek_adapter_opt_spread_count(ek); + bool length_can_be_zero = (length_constant == 0); + if (length_constant < 0) { + // some adapters with variable length must handle the zero case + if (!OptimizeMethodHandles || + ek_adapter_opt_spread_type(ek) != T_OBJECT) + length_can_be_zero = true; + } // find the address of the array argument __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); - // grab some temps - { __ push(rsi); __ push(rdi); } - // (preceding pushes must be done after argslot address is taken!) -#define UNPUSH_RSI_RDI \ - { __ pop(rdi); __ pop(rsi); } + // grab another temp + Register rsi_temp = rsi; + { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); } + // (preceding push must be done after argslot address is taken!) +#define UNPUSH_RSI \ + { if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); } // arx_argslot points both to the array and to the first output arg vmarg = Address(rax_argslot, 0); // Get the array value. - Register rsi_array = rsi; + Register rsi_array = rsi_temp; Register rdx_array_klass = rdx_temp; - BasicType elem_type = T_OBJECT; + BasicType elem_type = ek_adapter_opt_spread_type(ek); + int elem_slots = type2size[elem_type]; // 1 or 2 + int array_slots = 1; // array is always a T_OBJECT int length_offset = arrayOopDesc::length_offset_in_bytes(); int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); __ movptr(rsi_array, vmarg); - Label skip_array_check; - if (length_constant == 0) { + + Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; + if (length_can_be_zero) { + // handle the null pointer case, if zero is allowed + Label L_skip; + if (length_constant < 0) { + load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion); + __ testl(rbx_temp, rbx_temp); + __ jcc(Assembler::notZero, L_skip); + } __ testptr(rsi_array, rsi_array); - __ jcc(Assembler::zero, skip_array_check); + __ jcc(Assembler::zero, L_array_is_empty); + __ bind(L_skip); } __ null_check(rsi_array, oopDesc::klass_offset_in_bytes()); __ load_klass(rdx_array_klass, rsi_array); @@ -1081,22 +2272,20 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan // Check the array type. Register rbx_klass = rbx_temp; __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! - __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); + load_klass_from_Class(_masm, rbx_klass); Label ok_array_klass, bad_array_klass, bad_array_length; - __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass); + __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass); // If we get here, the type check failed! __ jmp(bad_array_klass); - __ bind(ok_array_klass); + __ BIND(ok_array_klass); // Check length. if (length_constant >= 0) { __ cmpl(Address(rsi_array, length_offset), length_constant); } else { Register rbx_vminfo = rbx_temp; - __ movl(rbx_vminfo, rcx_amh_conversion); - assert(CONV_VMINFO_SHIFT == 0, "preshifted"); - __ andl(rbx_vminfo, CONV_VMINFO_MASK); + load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); __ cmpl(rbx_vminfo, Address(rsi_array, length_offset)); } __ jcc(Assembler::notEqual, bad_array_length); @@ -1108,90 +2297,104 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan // Form a pointer to the end of the affected region. __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize)); // 'stack_move' is negative number of words to insert - Register rdi_stack_move = rdi; - __ movl2ptr(rdi_stack_move, rcx_amh_conversion); - __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); + // This number already accounts for elem_slots. + Register rdi_stack_move = rdi_temp; + load_stack_move(_masm, rdi_stack_move, rcx_recv, true); + __ cmpptr(rdi_stack_move, 0); + assert(stack_move_unit() < 0, "else change this comparison"); + __ jcc(Assembler::less, L_insert_arg_space); + __ jcc(Assembler::equal, L_copy_args); + // single argument case, with no array movement + __ BIND(L_array_is_empty); + remove_arg_slots(_masm, -stack_move_unit() * array_slots, + rax_argslot, rbx_temp, rdx_temp); + __ jmp(L_args_done); // no spreading to do + __ BIND(L_insert_arg_space); + // come here in the usual case, stack_move < 0 (2 or more spread arguments) Register rsi_temp = rsi_array; // spill this - insert_arg_slots(_masm, rdi_stack_move, -1, + insert_arg_slots(_masm, rdi_stack_move, rax_argslot, rbx_temp, rsi_temp); - // reload the array (since rsi was killed) - __ movptr(rsi_array, vmarg); - } else if (length_constant > 1) { - int arg_mask = 0; - int new_slots = (length_constant - 1); - for (int i = 0; i < new_slots; i++) { - arg_mask <<= 1; - arg_mask |= _INSERT_REF_MASK; - } - insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask, + // reload the array since rsi was killed + // reload from rdx_argslot_limit since rax_argslot is now decremented + __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize)); + } else if (length_constant >= 1) { + int new_slots = (length_constant * elem_slots) - array_slots; + insert_arg_slots(_masm, new_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); - } else if (length_constant == 1) { - // no stack resizing required } else if (length_constant == 0) { - remove_arg_slots(_masm, -stack_move_unit(), + __ BIND(L_array_is_empty); + remove_arg_slots(_masm, -stack_move_unit() * array_slots, rax_argslot, rbx_temp, rdx_temp); + } else { + ShouldNotReachHere(); } // Copy from the array to the new slots. // Note: Stack change code preserves integrity of rax_argslot pointer. // So even after slot insertions, rax_argslot still points to first argument. + // Beware: Arguments that are shallow on the stack are deep in the array, + // and vice versa. So a downward-growing stack (the usual) has to be copied + // elementwise in reverse order from the source array. + __ BIND(L_copy_args); if (length_constant == -1) { // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. + // Array element [0] goes at rdx_argslot_limit[-wordSize]. Register rsi_source = rsi_array; __ lea(rsi_source, Address(rsi_array, elem0_offset)); + Register rdx_fill_ptr = rdx_argslot_limit; Label loop; - __ bind(loop); - __ movptr(rbx_temp, Address(rsi_source, 0)); - __ movptr(Address(rax_argslot, 0), rbx_temp); + __ BIND(loop); + __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots); + move_typed_arg(_masm, elem_type, true, + Address(rdx_fill_ptr, 0), Address(rsi_source, 0), + rbx_temp, rdi_temp); __ addptr(rsi_source, type2aelembytes(elem_type)); - __ addptr(rax_argslot, Interpreter::stackElementSize); - __ cmpptr(rax_argslot, rdx_argslot_limit); - __ jccb(Assembler::less, loop); + __ cmpptr(rdx_fill_ptr, rax_argslot); + __ jcc(Assembler::above, loop); } else if (length_constant == 0) { - __ bind(skip_array_check); // nothing to copy } else { int elem_offset = elem0_offset; - int slot_offset = 0; + int slot_offset = length_constant * Interpreter::stackElementSize; for (int index = 0; index < length_constant; index++) { - __ movptr(rbx_temp, Address(rsi_array, elem_offset)); - __ movptr(Address(rax_argslot, slot_offset), rbx_temp); + slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward + move_typed_arg(_masm, elem_type, true, + Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset), + rbx_temp, rdi_temp); elem_offset += type2aelembytes(elem_type); - slot_offset += Interpreter::stackElementSize; } } + __ BIND(L_args_done); // Arguments are spread. Move to next method handle. - UNPUSH_RSI_RDI; + UNPUSH_RSI; __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); __ bind(bad_array_klass); - UNPUSH_RSI_RDI; + UNPUSH_RSI; assert(!vmarg.uses(rarg2_required), "must be different registers"); - __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type - __ movptr(rarg1_actual, vmarg); // bad array - __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? + __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type + __ movptr( rarg1_actual, vmarg); // bad array + __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); __ bind(bad_array_length); - UNPUSH_RSI_RDI; + UNPUSH_RSI; assert(!vmarg.uses(rarg2_required), "must be different registers"); - __ mov (rarg2_required, rcx_recv); // AMH requiring a certain length - __ movptr(rarg1_actual, vmarg); // bad array - __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? + __ mov( rarg2_required, rcx_recv); // AMH requiring a certain length + __ movptr( rarg1_actual, vmarg); // bad array + __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); +#undef UNPUSH_RSI -#undef UNPUSH_RSI_RDI + break; } - break; - case _adapter_flyby: - case _adapter_ricochet: - __ unimplemented(entry_name(ek)); // %%% FIXME: NYI - break; - - default: ShouldNotReachHere(); + default: + // do not require all platforms to recognize all adapter types + __ nop(); + return; } __ hlt(); diff --git a/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp b/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp new file mode 100644 index 00000000000..2a93500d7e5 --- /dev/null +++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Platform-specific definitions for method handles. +// These definitions are inlined into class MethodHandles. + +public: + +// The stack just after the recursive call from a ricochet frame +// looks something like this. Offsets are marked in words, not bytes. +// rsi (r13 on LP64) is part of the interpreter calling sequence +// which tells the callee where is my real rsp (for frame walking). +// (...lower memory addresses) +// rsp: [ return pc ] always the global RicochetBlob::bounce_addr +// rsp+1: [ recursive arg N ] +// rsp+2: [ recursive arg N-1 ] +// ... +// rsp+N: [ recursive arg 1 ] +// rsp+N+1: [ recursive method handle ] +// ... +// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame) +// rbp-5: [ saved target MH ] the MH we will call on the saved args +// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout +// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0) +// rbp-2: [ conversion ] information about how the return value is used +// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame +// rbp+0: [ saved sender fp ] (for original sender of AMH) +// rbp+1: [ saved sender pc ] (back to original sender of AMH) +// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender) +// rbp+3: [ transformed adapter arg M-1] +// ... +// rbp+M+1: [ transformed adapter arg 1 ] +// rbp+M+2: [ padding ] <-- (rbp + saved args base offset) +// ... [ optional padding] +// (higher memory addresses...) +// +// The arguments originally passed by the original sender +// are lost, and arbitrary amounts of stack motion might have +// happened due to argument transformation. +// (This is done by C2I/I2C adapters and non-direct method handles.) +// This is why there is an unpredictable amount of memory between +// the extended and exact TOS of the sender. +// The ricochet adapter itself will also (in general) perform +// transformations before the recursive call. +// +// The transformed and saved arguments, immediately above the saved +// return PC, are a well-formed method handle invocation ready to execute. +// When the GC needs to walk the stack, these arguments are described +// via the saved arg types oop, an int[] array with a private format. +// This array is derived from the type of the transformed adapter +// method handle, which also sits at the base of the saved argument +// bundle. Since the GC may not be able to fish out the int[] +// array, so it is pushed explicitly on the stack. This may be +// an unnecessary expense. +// +// The following register conventions are significant at this point: +// rsp the thread stack, as always; preserved by caller +// rsi/r13 exact TOS of recursive frame (contents of [rbp-2]) +// rcx recursive method handle (contents of [rsp+N+1]) +// rbp preserved by caller (not used by caller) +// Unless otherwise specified, all registers can be blown by the call. +// +// If this frame must be walked, the transformed adapter arguments +// will be found with the help of the saved arguments descriptor. +// +// Therefore, the descriptor must match the referenced arguments. +// The arguments must be followed by at least one word of padding, +// which will be necessary to complete the final method handle call. +// That word is not treated as holding an oop. Neither is the word +// +// The word pointed to by the return argument pointer is not +// treated as an oop, even if points to a saved argument. +// This allows the saved argument list to have a "hole" in it +// to receive an oop from the recursive call. +// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.) +// +// When the recursive callee returns, RicochetBlob::bounce_addr will +// immediately jump to the continuation stored in the RF. +// This continuation will merge the recursive return value +// into the saved argument list. At that point, the original +// rsi, rbp, and rsp will be reloaded, the ricochet frame will +// disappear, and the final target of the adapter method handle +// will be invoked on the transformed argument list. + +class RicochetFrame { + friend class MethodHandles; + + private: + intptr_t* _continuation; // what to do when control gets back here + oopDesc* _saved_target; // target method handle to invoke on saved_args + oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie + intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3) + intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2) + intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1) + intptr_t* _sender_link; // *must* coincide with frame::link_offset (0) + address _sender_pc; // *must* coincide with frame::return_addr_offset (1) + + public: + intptr_t* continuation() const { return _continuation; } + oop saved_target() const { return _saved_target; } + oop saved_args_layout() const { return _saved_args_layout; } + intptr_t* saved_args_base() const { return _saved_args_base; } + intptr_t conversion() const { return _conversion; } + intptr_t* exact_sender_sp() const { return _exact_sender_sp; } + intptr_t* sender_link() const { return _sender_link; } + address sender_pc() const { return _sender_pc; } + + intptr_t* extended_sender_sp() const { return saved_args_base(); } + + intptr_t return_value_slot_number() const { + return adapter_conversion_vminfo(conversion()); + } + BasicType return_value_type() const { + return adapter_conversion_dest_type(conversion()); + } + bool has_return_value_slot() const { + return return_value_type() != T_VOID; + } + intptr_t* return_value_slot_addr() const { + assert(has_return_value_slot(), ""); + return saved_arg_slot_addr(return_value_slot_number()); + } + intptr_t* saved_target_slot_addr() const { + return saved_arg_slot_addr(saved_args_length()); + } + intptr_t* saved_arg_slot_addr(int slot) const { + assert(slot >= 0, ""); + return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) ); + } + + jint saved_args_length() const; + jint saved_arg_offset(int arg) const; + + // GC interface + oop* saved_target_addr() { return (oop*)&_saved_target; } + oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; } + + oop compute_saved_args_layout(bool read_cache, bool write_cache); + + // Compiler/assembler interface. + static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); } + static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); } + static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); } + static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); } + static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); } + static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); } + static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); } + static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); } + + // This value is not used for much, but it apparently must be nonzero. + static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); } + +#ifdef ASSERT + // The magic number is supposed to help find ricochet frames within the bytes of stack dumps. + enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E }; + static int magic_number_1_offset_in_bytes() { return -wordSize; } + static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); } + intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); }; + intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); }; +#endif //ASSERT + + enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) }; + + static void verify_offsets() NOT_DEBUG_RETURN; + void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc. + void zap_arguments() NOT_DEBUG_RETURN; + + static void generate_ricochet_blob(MacroAssembler* _masm, + // output params: + int* frame_size_in_words, int* bounce_offset, int* exception_offset); + + static void enter_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register rax_argv, + address return_handler, + Register rbx_temp); + static void leave_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register new_sp_reg, + Register sender_pc_reg); + + static Address frame_address(int offset = 0) { + // The RicochetFrame is found by subtracting a constant offset from rbp. + return Address(rbp, - sender_link_offset_in_bytes() + offset); + } + + static RicochetFrame* from_frame(const frame& fr) { + address bp = (address) fr.fp(); + RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes()); + rf->verify(); + return rf; + } + + static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; +}; + +// Additional helper methods for MethodHandles code generation: +public: + static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); + static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr); + static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr); + + static void load_stack_move(MacroAssembler* _masm, + Register rdi_stack_move, + Register rcx_amh, + bool might_be_negative); + + static void insert_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp); + + static void remove_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp); + + static void push_arg_slots(MacroAssembler* _masm, + Register rax_argslot, + RegisterOrConstant slot_count, + int skip_words_count, + Register rbx_temp, Register rdx_temp); + + static void move_arg_slots_up(MacroAssembler* _masm, + Register rbx_bottom, // invariant + Address top_addr, // can use rax_temp + RegisterOrConstant positive_distance_in_slots, + Register rax_temp, Register rdx_temp); + + static void move_arg_slots_down(MacroAssembler* _masm, + Address bottom_addr, // can use rax_temp + Register rbx_top, // invariant + RegisterOrConstant negative_distance_in_slots, + Register rax_temp, Register rdx_temp); + + static void move_typed_arg(MacroAssembler* _masm, + BasicType type, bool is_element, + Address slot_dest, Address value_src, + Register rbx_temp, Register rdx_temp); + + static void move_return_value(MacroAssembler* _masm, BasicType type, + Address return_slot); + + static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, + const char* error_message) NOT_DEBUG_RETURN; + + static void verify_argslots(MacroAssembler* _masm, + RegisterOrConstant argslot_count, + Register argslot_reg, + bool negate_argslot, + const char* error_message) NOT_DEBUG_RETURN; + + static void verify_stack_move(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + int direction) NOT_DEBUG_RETURN; + + static void verify_klass(MacroAssembler* _masm, + Register obj, KlassHandle klass, + const char* error_message = "wrong klass") NOT_DEBUG_RETURN; + + static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) { + verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(), + "reference is a MH"); + } + + static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; + + static Register saved_last_sp_register() { + // Should be in sharedRuntime, not here. + return LP64_ONLY(r13) NOT_LP64(rsi); + } diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp index 149bc7c9ea4..bb32cf34dab 100644 --- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp @@ -2253,6 +2253,31 @@ uint SharedRuntime::out_preserve_stack_slots() { return 0; } +//----------------------------generate_ricochet_blob--------------------------- +void SharedRuntime::generate_ricochet_blob() { + if (!EnableInvokeDynamic) return; // leave it as a null + + // allocate space for the code + ResourceMark rm; + // setup code generation tools + CodeBuffer buffer("ricochet_blob", 256, 256); + MacroAssembler* masm = new MacroAssembler(&buffer); + + int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1; + MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset); + + // ------------- + // make sure all code is generated + masm->flush(); + + // failed to generate? + if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) { + assert(false, "bad ricochet blob"); + return; + } + + _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words); +} //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { @@ -2996,6 +3021,8 @@ void SharedRuntime::generate_stubs() { generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); + generate_ricochet_blob(); + generate_deopt_blob(); #ifdef COMPILER2 generate_uncommon_trap_blob(); diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp index c017cf86d47..89766d1f3f7 100644 --- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp @@ -2530,6 +2530,32 @@ uint SharedRuntime::out_preserve_stack_slots() { } +//----------------------------generate_ricochet_blob--------------------------- +void SharedRuntime::generate_ricochet_blob() { + if (!EnableInvokeDynamic) return; // leave it as a null + + // allocate space for the code + ResourceMark rm; + // setup code generation tools + CodeBuffer buffer("ricochet_blob", 512, 512); + MacroAssembler* masm = new MacroAssembler(&buffer); + + int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1; + MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset); + + // ------------- + // make sure all code is generated + masm->flush(); + + // failed to generate? + if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) { + assert(false, "bad ricochet blob"); + return; + } + + _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words); +} + //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { // Allocate space for the code @@ -3205,6 +3231,8 @@ void SharedRuntime::generate_stubs() { generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); + generate_ricochet_blob(); + generate_deopt_blob(); #ifdef COMPILER2 diff --git a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp index 283274e3dde..65b131b3757 100644 --- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp +++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp @@ -36,7 +36,7 @@ enum platform_dependent_constants { // MethodHandles adapters enum method_handles_platform_dependent_constants { - method_handles_adapters_code_size = 10000 + method_handles_adapters_code_size = 30000 DEBUG_ONLY(+ 10000) }; class x86 { diff --git a/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp b/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp index b1726caa692..ac6f2fcd853 100644 --- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp +++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp @@ -38,7 +38,7 @@ enum platform_dependent_constants { // MethodHandles adapters enum method_handles_platform_dependent_constants { - method_handles_adapters_code_size = 40000 + method_handles_adapters_code_size = 80000 DEBUG_ONLY(+ 120000) }; class x86 { diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp index a7222c265a5..6184bc393b5 100644 --- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp @@ -1589,6 +1589,7 @@ int AbstractInterpreter::layout_activation(methodOop method, int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_locals, frame* caller, diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp index 8d3740c7049..80f08524b06 100644 --- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp @@ -1603,6 +1603,7 @@ int AbstractInterpreter::layout_activation(methodOop method, int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_locals, frame* caller, diff --git a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp index 13a4710a104..961de9fdd96 100644 --- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp +++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp @@ -1427,6 +1427,7 @@ int AbstractInterpreter::layout_activation(methodOop method, int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_locals, frame* caller, diff --git a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp b/hotspot/src/cpu/zero/vm/interpreter_zero.cpp index ff5a69be88e..93c3b90fa45 100644 --- a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp +++ b/hotspot/src/cpu/zero/vm/interpreter_zero.cpp @@ -82,24 +82,6 @@ bool AbstractInterpreter::can_be_compiled(methodHandle m) { return true; } -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, - popframe_extra_args, - moncount, - callee_param_count, - callee_locals, - (frame*) NULL, - (frame*) NULL, - is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { } diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp index f00e426b881..791f62ecb7b 100644 --- a/hotspot/src/os/linux/vm/os_linux.cpp +++ b/hotspot/src/os/linux/vm/os_linux.cpp @@ -2850,7 +2850,7 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { char chars[257]; long x = 0; if (fgets(chars, sizeof(chars), fp)) { - if (sscanf(chars, "%lx-%*lx", &x) == 1 + if (sscanf(chars, "%lx-%*x", &x) == 1 && x == (long)p) { if (strstr (chars, "hugepage")) { result = true; diff --git a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp index e27ce064d94..1db5a4dd1b7 100644 --- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp +++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp @@ -132,17 +132,22 @@ void InstructionPrinter::print_object(Value obj) { if (value->is_null_object()) { output()->print("null"); } else if (!value->is_loaded()) { - output()->print("", value); + output()->print("", value); } else if (value->is_method()) { ciMethod* m = (ciMethod*)value; output()->print("", m->holder()->name()->as_utf8(), m->name()->as_utf8()); } else { - output()->print("", value->constant_encoding()); + output()->print("", value->constant_encoding()); } } else if (type->as_InstanceConstant() != NULL) { - output()->print("", type->as_InstanceConstant()->value()->constant_encoding()); + ciInstance* value = type->as_InstanceConstant()->value(); + if (value->is_loaded()) { + output()->print("", value->constant_encoding()); + } else { + output()->print("", value); + } } else if (type->as_ArrayConstant() != NULL) { - output()->print("", type->as_ArrayConstant()->value()->constant_encoding()); + output()->print("", type->as_ArrayConstant()->value()->constant_encoding()); } else if (type->as_ClassConstant() != NULL) { ciInstanceKlass* klass = type->as_ClassConstant()->value(); if (!klass->is_loaded()) { diff --git a/hotspot/src/share/vm/c1/c1_Optimizer.cpp b/hotspot/src/share/vm/c1/c1_Optimizer.cpp index f07494d379c..8d21edb7607 100644 --- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp +++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp @@ -252,26 +252,28 @@ Value CE_Eliminator::make_ifop(Value x, Instruction::Condition cond, Value y, Va Constant::CompareResult t_compare_res = x_tval_const->compare(cond, y_const); Constant::CompareResult f_compare_res = x_fval_const->compare(cond, y_const); - guarantee(t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable, "incomparable constants in IfOp"); + // not_comparable here is a valid return in case we're comparing unloaded oop constants + if (t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable) { + Value new_tval = t_compare_res == Constant::cond_true ? tval : fval; + Value new_fval = f_compare_res == Constant::cond_true ? tval : fval; - Value new_tval = t_compare_res == Constant::cond_true ? tval : fval; - Value new_fval = f_compare_res == Constant::cond_true ? tval : fval; - - _ifop_count++; - if (new_tval == new_fval) { - return new_tval; - } else { - return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval); + _ifop_count++; + if (new_tval == new_fval) { + return new_tval; + } else { + return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval); + } } } } else { Constant* x_const = x->as_Constant(); if (x_const != NULL) { // x and y are constants Constant::CompareResult x_compare_res = x_const->compare(cond, y_const); - guarantee(x_compare_res != Constant::not_comparable, "incomparable constants in IfOp"); - - _ifop_count++; - return x_compare_res == Constant::cond_true ? tval : fval; + // not_comparable here is a valid return in case we're comparing unloaded oop constants + if (x_compare_res != Constant::not_comparable) { + _ifop_count++; + return x_compare_res == Constant::cond_true ? tval : fval; + } } } } diff --git a/hotspot/src/share/vm/ci/ciMethodData.hpp b/hotspot/src/share/vm/ci/ciMethodData.hpp index d84b2c83f17..17fe9c9ed5f 100644 --- a/hotspot/src/share/vm/ci/ciMethodData.hpp +++ b/hotspot/src/share/vm/ci/ciMethodData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -233,7 +233,10 @@ private: public: bool is_method_data() { return true; } - bool is_empty() { return _state == empty_state; } + + void set_mature() { _state = mature_state; } + + bool is_empty() { return _state == empty_state; } bool is_mature() { return _state == mature_state; } int creation_mileage() { return _orig.creation_mileage(); } diff --git a/hotspot/src/share/vm/ci/ciMethodHandle.cpp b/hotspot/src/share/vm/ci/ciMethodHandle.cpp index ef57f800cab..c05b7eedf96 100644 --- a/hotspot/src/share/vm/ci/ciMethodHandle.cpp +++ b/hotspot/src/share/vm/ci/ciMethodHandle.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "ci/ciClassList.hpp" #include "ci/ciInstance.hpp" +#include "ci/ciMethodData.hpp" #include "ci/ciMethodHandle.hpp" #include "ci/ciUtilities.hpp" #include "prims/methodHandleWalk.hpp" @@ -36,13 +37,13 @@ // ciMethodHandle::get_adapter // // Return an adapter for this MethodHandle. -ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { +ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const { VM_ENTRY_MARK; Handle h(get_oop()); methodHandle callee(_callee->get_methodOop()); // We catch all exceptions here that could happen in the method // handle compiler and stop the VM. - MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD); + MethodHandleCompiler mhc(h, callee, _profile->count(), is_invokedynamic, THREAD); if (!HAS_PENDING_EXCEPTION) { methodHandle m = mhc.compile(THREAD); if (!HAS_PENDING_EXCEPTION) { @@ -58,6 +59,22 @@ ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { return NULL; } +// ------------------------------------------------------------------ +// ciMethodHandle::get_adapter +// +// Return an adapter for this MethodHandle. +ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { + ciMethod* result = get_adapter_impl(is_invokedynamic); + if (result) { + // Fake up the MDO maturity. + ciMethodData* mdo = result->method_data(); + if (mdo != NULL && _caller->method_data() != NULL && _caller->method_data()->is_mature()) { + mdo->set_mature(); + } + } + return result; +} + // ------------------------------------------------------------------ // ciMethodHandle::print_impl diff --git a/hotspot/src/share/vm/ci/ciMethodHandle.hpp b/hotspot/src/share/vm/ci/ciMethodHandle.hpp index 69c1379946c..d9519d4a6e9 100644 --- a/hotspot/src/share/vm/ci/ciMethodHandle.hpp +++ b/hotspot/src/share/vm/ci/ciMethodHandle.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_CI_CIMETHODHANDLE_HPP #define SHARE_VM_CI_CIMETHODHANDLE_HPP +#include "ci/ciCallProfile.hpp" #include "ci/ciInstance.hpp" #include "prims/methodHandles.hpp" @@ -33,32 +34,37 @@ // The class represents a java.lang.invoke.MethodHandle object. class ciMethodHandle : public ciInstance { private: - ciMethod* _callee; + ciMethod* _callee; + ciMethod* _caller; + ciCallProfile* _profile; // Return an adapter for this MethodHandle. - ciMethod* get_adapter(bool is_invokedynamic) const; + ciMethod* get_adapter_impl(bool is_invokedynamic) const; + ciMethod* get_adapter( bool is_invokedynamic) const; protected: void print_impl(outputStream* st); public: - ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {}; + ciMethodHandle(instanceHandle h_i) : + ciInstance(h_i), + _callee(NULL), + _caller(NULL), + _profile(NULL) + {} // What kind of ciObject is this? bool is_method_handle() const { return true; } - ciMethod* callee() const { return _callee; } - void set_callee(ciMethod* m) { _callee = m; } + void set_callee(ciMethod* m) { _callee = m; } + void set_caller(ciMethod* m) { _caller = m; } + void set_call_profile(ciCallProfile* profile) { _profile = profile; } // Return an adapter for a MethodHandle call. - ciMethod* get_method_handle_adapter() const { - return get_adapter(false); - } + ciMethod* get_method_handle_adapter() const { return get_adapter(false); } // Return an adapter for an invokedynamic call. - ciMethod* get_invokedynamic_adapter() const { - return get_adapter(true); - } + ciMethod* get_invokedynamic_adapter() const { return get_adapter(true); } }; #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP diff --git a/hotspot/src/share/vm/classfile/javaClasses.cpp b/hotspot/src/share/vm/classfile/javaClasses.cpp index fa422673d48..eb512306f38 100644 --- a/hotspot/src/share/vm/classfile/javaClasses.cpp +++ b/hotspot/src/share/vm/classfile/javaClasses.cpp @@ -2602,6 +2602,7 @@ int java_lang_invoke_MethodType::ptype_count(oop mt) { // Support for java_lang_invoke_MethodTypeForm int java_lang_invoke_MethodTypeForm::_vmslots_offset; +int java_lang_invoke_MethodTypeForm::_vmlayout_offset; int java_lang_invoke_MethodTypeForm::_erasedType_offset; int java_lang_invoke_MethodTypeForm::_genericInvoker_offset; @@ -2609,6 +2610,7 @@ void java_lang_invoke_MethodTypeForm::compute_offsets() { klassOop k = SystemDictionary::MethodTypeForm_klass(); if (k != NULL) { compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); + compute_optional_offset(_vmlayout_offset, k, vmSymbols::vmlayout_name(), vmSymbols::object_signature()); compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true); compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true); if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value @@ -2617,9 +2619,31 @@ void java_lang_invoke_MethodTypeForm::compute_offsets() { int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + assert(_vmslots_offset > 0, ""); return mtform->int_field(_vmslots_offset); } +oop java_lang_invoke_MethodTypeForm::vmlayout(oop mtform) { + assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + assert(_vmlayout_offset > 0, ""); + return mtform->obj_field(_vmlayout_offset); +} + +oop java_lang_invoke_MethodTypeForm::init_vmlayout(oop mtform, oop cookie) { + assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + oop previous = vmlayout(mtform); + if (previous != NULL) { + return previous; // someone else beat us to it + } + HeapWord* cookie_addr = (HeapWord*) mtform->obj_field_addr(_vmlayout_offset); + OrderAccess::storestore(); // make sure our copy is fully committed + previous = oopDesc::atomic_compare_exchange_oop(cookie, cookie_addr, previous); + if (previous != NULL) { + return previous; // someone else beat us to it + } + return cookie; +} + oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); return mtform->obj_field(_erasedType_offset); diff --git a/hotspot/src/share/vm/classfile/javaClasses.hpp b/hotspot/src/share/vm/classfile/javaClasses.hpp index 5701bc3893c..eb104b84361 100644 --- a/hotspot/src/share/vm/classfile/javaClasses.hpp +++ b/hotspot/src/share/vm/classfile/javaClasses.hpp @@ -949,18 +949,19 @@ class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodH OP_CHECK_CAST = 0x2, // ref-to-ref conversion; requires a Class argument OP_PRIM_TO_PRIM = 0x3, // converts from one primitive to another OP_REF_TO_PRIM = 0x4, // unboxes a wrapper to produce a primitive - OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper (NYI) + OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper OP_SWAP_ARGS = 0x6, // swap arguments (vminfo is 2nd arg) OP_ROT_ARGS = 0x7, // rotate arguments (vminfo is displaced arg) OP_DUP_ARGS = 0x8, // duplicates one or more arguments (at TOS) OP_DROP_ARGS = 0x9, // remove one or more argument slots - OP_COLLECT_ARGS = 0xA, // combine one or more arguments into a varargs (NYI) + OP_COLLECT_ARGS = 0xA, // combine arguments using an auxiliary function OP_SPREAD_ARGS = 0xB, // expand in place a varargs array (of known size) - OP_FLYBY = 0xC, // operate first on reified argument list (NYI) - OP_RICOCHET = 0xD, // run an adapter chain on the return value (NYI) + OP_FOLD_ARGS = 0xC, // combine but do not remove arguments; prepend result + //OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists CONV_OP_LIMIT = 0xE, // limit of CONV_OP enumeration CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field + CONV_TYPE_MASK = 0x0F, // fits T_ADDRESS and below CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK @@ -1089,6 +1090,7 @@ class java_lang_invoke_MethodTypeForm: AllStatic { private: static int _vmslots_offset; // number of argument slots needed + static int _vmlayout_offset; // object describing internal calling sequence static int _erasedType_offset; // erasedType = canonical MethodType static int _genericInvoker_offset; // genericInvoker = adapter for invokeGeneric @@ -1100,8 +1102,12 @@ class java_lang_invoke_MethodTypeForm: AllStatic { static oop erasedType(oop mtform); static oop genericInvoker(oop mtform); + static oop vmlayout(oop mtform); + static oop init_vmlayout(oop mtform, oop cookie); + // Accessors for code generation: static int vmslots_offset_in_bytes() { return _vmslots_offset; } + static int vmlayout_offset_in_bytes() { return _vmlayout_offset; } static int erasedType_offset_in_bytes() { return _erasedType_offset; } static int genericInvoker_offset_in_bytes() { return _genericInvoker_offset; } }; diff --git a/hotspot/src/share/vm/classfile/systemDictionary.cpp b/hotspot/src/share/vm/classfile/systemDictionary.cpp index dd185e6bc98..9d870ba1b9d 100644 --- a/hotspot/src/share/vm/classfile/systemDictionary.cpp +++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp @@ -2362,8 +2362,15 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name, spe = invoke_method_table()->find_entry(index, hash, signature, name_id); if (spe == NULL) spe = invoke_method_table()->add_entry(index, hash, signature, name_id); - if (spe->property_oop() == NULL) + if (spe->property_oop() == NULL) { spe->set_property_oop(m()); + // Link m to his method type, if it is suitably generic. + oop mtform = java_lang_invoke_MethodType::form(mt()); + if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform) + && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { + java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m()); + } + } } else { non_cached_result = m; } diff --git a/hotspot/src/share/vm/classfile/vmSymbols.hpp b/hotspot/src/share/vm/classfile/vmSymbols.hpp index 526ba6455ea..fd8601d4b23 100644 --- a/hotspot/src/share/vm/classfile/vmSymbols.hpp +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp @@ -341,6 +341,7 @@ template(vmtarget_name, "vmtarget") \ template(vmentry_name, "vmentry") \ template(vmslots_name, "vmslots") \ + template(vmlayout_name, "vmlayout") \ template(vmindex_name, "vmindex") \ template(vmargslot_name, "vmargslot") \ template(flags_name, "flags") \ @@ -393,6 +394,7 @@ template(void_signature, "V") \ template(byte_array_signature, "[B") \ template(char_array_signature, "[C") \ + template(int_array_signature, "[I") \ template(object_void_signature, "(Ljava/lang/Object;)V") \ template(object_int_signature, "(Ljava/lang/Object;)I") \ template(object_boolean_signature, "(Ljava/lang/Object;)Z") \ diff --git a/hotspot/src/share/vm/code/codeBlob.cpp b/hotspot/src/share/vm/code/codeBlob.cpp index dead37cc336..244c32043f3 100644 --- a/hotspot/src/share/vm/code/codeBlob.cpp +++ b/hotspot/src/share/vm/code/codeBlob.cpp @@ -152,6 +152,32 @@ void CodeBlob::set_oop_maps(OopMapSet* p) { } +void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { + // Do not hold the CodeCache lock during name formatting. + assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); + + if (stub != NULL) { + char stub_id[256]; + assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); + jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); + if (PrintStubCode) { + tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); + Disassembler::decode(stub->code_begin(), stub->code_end()); + } + Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); + + if (JvmtiExport::should_post_dynamic_code_generated()) { + const char* stub_name = name2; + if (name2[0] == '\0') stub_name = name1; + JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); + } + } + + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); +} + + void CodeBlob::flush() { if (_oop_maps) { FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); @@ -312,23 +338,7 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); } - // Do not hold the CodeCache lock during name formatting. - if (stub != NULL) { - char stub_id[256]; - jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub); - Disassembler::decode(stub->code_begin(), stub->code_end()); - } - Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(stub, "RuntimeStub - ", stub_name); return stub; } @@ -340,6 +350,50 @@ void* RuntimeStub::operator new(size_t s, unsigned size) { return p; } +// operator new shared by all singletons: +void* SingletonBlob::operator new(size_t s, unsigned size) { + void* p = CodeCache::allocate(size); + if (!p) fatal("Initial size of CodeCache is too small"); + return p; +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of RicochetBlob + +RicochetBlob::RicochetBlob( + CodeBuffer* cb, + int size, + int bounce_offset, + int exception_offset, + int frame_size +) +: SingletonBlob("RicochetBlob", cb, sizeof(RicochetBlob), size, frame_size, (OopMapSet*) NULL) +{ + _bounce_offset = bounce_offset; + _exception_offset = exception_offset; +} + + +RicochetBlob* RicochetBlob::create( + CodeBuffer* cb, + int bounce_offset, + int exception_offset, + int frame_size) +{ + RicochetBlob* blob = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(RicochetBlob)); + blob = new (size) RicochetBlob(cb, size, bounce_offset, exception_offset, frame_size); + } + + trace_new_stub(blob, "RicochetBlob"); + + return blob; +} + //---------------------------------------------------------------------------------------------------- // Implementation of DeoptimizationBlob @@ -386,34 +440,12 @@ DeoptimizationBlob* DeoptimizationBlob::create( frame_size); } - // Do not hold the CodeCache lock during name formatting. - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "DeoptimizationBlob"); return blob; } -void* DeoptimizationBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} - //---------------------------------------------------------------------------------------------------- // Implementation of UncommonTrapBlob @@ -441,33 +473,12 @@ UncommonTrapBlob* UncommonTrapBlob::create( blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); } - // Do not hold the CodeCache lock during name formatting. - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "UncommonTrapBlob"); return blob; } -void* UncommonTrapBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} #endif // COMPILER2 @@ -498,33 +509,12 @@ ExceptionBlob* ExceptionBlob::create( blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); } - // We do not need to hold the CodeCache lock during name formatting - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("ExceptionBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "ExceptionBlob"); return blob; } -void* ExceptionBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} #endif // COMPILER2 @@ -554,35 +544,12 @@ SafepointBlob* SafepointBlob::create( blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); } - // We do not need to hold the CodeCache lock during name formatting. - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("SafepointBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "SafepointBlob"); return blob; } -void* SafepointBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} - - //---------------------------------------------------------------------------------------------------- // Verification and printing diff --git a/hotspot/src/share/vm/code/codeBlob.hpp b/hotspot/src/share/vm/code/codeBlob.hpp index 61e64882d43..d653794885f 100644 --- a/hotspot/src/share/vm/code/codeBlob.hpp +++ b/hotspot/src/share/vm/code/codeBlob.hpp @@ -35,6 +35,7 @@ // Suptypes are: // nmethod : Compiled Java methods (include method that calls to native code) // RuntimeStub : Call to VM runtime methods +// RicochetBlob : Used for blocking MethodHandle adapters // DeoptimizationBlob : Used for deoptimizatation // ExceptionBlob : Used for stack unrolling // SafepointBlob : Used to handle illegal instruction exceptions @@ -95,12 +96,13 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC { void flush(); // Typing - virtual bool is_buffer_blob() const { return false; } - virtual bool is_nmethod() const { return false; } - virtual bool is_runtime_stub() const { return false; } - virtual bool is_deoptimization_stub() const { return false; } - virtual bool is_uncommon_trap_stub() const { return false; } - virtual bool is_exception_stub() const { return false; } + virtual bool is_buffer_blob() const { return false; } + virtual bool is_nmethod() const { return false; } + virtual bool is_runtime_stub() const { return false; } + virtual bool is_ricochet_stub() const { return false; } + virtual bool is_deoptimization_stub() const { return false; } + virtual bool is_uncommon_trap_stub() const { return false; } + virtual bool is_exception_stub() const { return false; } virtual bool is_safepoint_stub() const { return false; } virtual bool is_adapter_blob() const { return false; } virtual bool is_method_handles_adapter_blob() const { return false; } @@ -182,6 +184,9 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC { virtual void print_on(outputStream* st) const; virtual void print_value_on(outputStream* st) const; + // Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService. + static void trace_new_stub(CodeBlob* blob, const char* name1, const char* name2 = ""); + // Print the comment associated with offset on stream, if there is one virtual void print_block_comment(outputStream* stream, address block_begin) { intptr_t offset = (intptr_t)(block_begin - code_begin()); @@ -318,7 +323,11 @@ class RuntimeStub: public CodeBlob { class SingletonBlob: public CodeBlob { friend class VMStructs; - public: + + protected: + void* operator new(size_t s, unsigned size); + + public: SingletonBlob( const char* name, CodeBuffer* cb, @@ -340,6 +349,50 @@ class SingletonBlob: public CodeBlob { }; +//---------------------------------------------------------------------------------------------------- +// RicochetBlob +// Holds an arbitrary argument list indefinitely while Java code executes recursively. + +class RicochetBlob: public SingletonBlob { + friend class VMStructs; + private: + + int _bounce_offset; + int _exception_offset; + + // Creation support + RicochetBlob( + CodeBuffer* cb, + int size, + int bounce_offset, + int exception_offset, + int frame_size + ); + + public: + // Creation + static RicochetBlob* create( + CodeBuffer* cb, + int bounce_offset, + int exception_offset, + int frame_size + ); + + // Typing + bool is_ricochet_stub() const { return true; } + + // GC for args + void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } + + address bounce_addr() const { return code_begin() + _bounce_offset; } + address exception_addr() const { return code_begin() + _exception_offset; } + bool returns_to_bounce_addr(address pc) const { + address bounce_pc = bounce_addr(); + return (pc == bounce_pc || (pc + frame::pc_return_offset) == bounce_pc); + } +}; + + //---------------------------------------------------------------------------------------------------- // DeoptimizationBlob @@ -363,8 +416,6 @@ class DeoptimizationBlob: public SingletonBlob { int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static DeoptimizationBlob* create( @@ -378,7 +429,6 @@ class DeoptimizationBlob: public SingletonBlob { // Typing bool is_deoptimization_stub() const { return true; } - const DeoptimizationBlob *as_deoptimization_stub() const { return this; } bool exception_address_is_unpack_entry(address pc) const { address unpack_pc = unpack(); return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc); @@ -426,8 +476,6 @@ class UncommonTrapBlob: public SingletonBlob { int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static UncommonTrapBlob* create( @@ -458,8 +506,6 @@ class ExceptionBlob: public SingletonBlob { int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static ExceptionBlob* create( @@ -491,8 +537,6 @@ class SafepointBlob: public SingletonBlob { int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static SafepointBlob* create( diff --git a/hotspot/src/share/vm/code/codeCache.cpp b/hotspot/src/share/vm/code/codeCache.cpp index 2dd42dc06af..8e82aaad056 100644 --- a/hotspot/src/share/vm/code/codeCache.cpp +++ b/hotspot/src/share/vm/code/codeCache.cpp @@ -796,6 +796,7 @@ void CodeCache::print_internals() { int nmethodCount = 0; int runtimeStubCount = 0; int adapterCount = 0; + int ricochetStubCount = 0; int deoptimizationStubCount = 0; int uncommonTrapStubCount = 0; int bufferBlobCount = 0; @@ -840,6 +841,8 @@ void CodeCache::print_internals() { } } else if (cb->is_runtime_stub()) { runtimeStubCount++; + } else if (cb->is_ricochet_stub()) { + ricochetStubCount++; } else if (cb->is_deoptimization_stub()) { deoptimizationStubCount++; } else if (cb->is_uncommon_trap_stub()) { @@ -876,6 +879,7 @@ void CodeCache::print_internals() { tty->print_cr("runtime_stubs: %d",runtimeStubCount); tty->print_cr("adapters: %d",adapterCount); tty->print_cr("buffer blobs: %d",bufferBlobCount); + tty->print_cr("ricochet_stubs: %d",ricochetStubCount); tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); tty->print_cr("\nnmethod size distribution (non-zombie java)"); diff --git a/hotspot/src/share/vm/compiler/disassembler.cpp b/hotspot/src/share/vm/compiler/disassembler.cpp index 48813b6cb89..0b4eb6dd638 100644 --- a/hotspot/src/share/vm/compiler/disassembler.cpp +++ b/hotspot/src/share/vm/compiler/disassembler.cpp @@ -283,10 +283,10 @@ void decode_env::print_address(address adr) { st->print("Stub::%s", desc->name()); if (desc->begin() != adr) st->print("%+d 0x%p",adr - desc->begin(), adr); - else if (WizardMode) st->print(" " INTPTR_FORMAT, adr); + else if (WizardMode) st->print(" " PTR_FORMAT, adr); return; } - st->print("Stub:: " INTPTR_FORMAT, adr); + st->print("Stub:: " PTR_FORMAT, adr); return; } @@ -314,8 +314,8 @@ void decode_env::print_address(address adr) { } } - // Fall through to a simple numeral. - st->print(INTPTR_FORMAT, (intptr_t)adr); + // Fall through to a simple (hexadecimal) numeral. + st->print(PTR_FORMAT, adr); } void decode_env::print_insn_labels() { @@ -326,7 +326,7 @@ void decode_env::print_insn_labels() { cb->print_block_comment(st, p); } if (_print_pc) { - st->print(" " INTPTR_FORMAT ": ", (intptr_t) p); + st->print(" " PTR_FORMAT ": ", p); } } @@ -432,7 +432,7 @@ address decode_env::decode_instructions(address start, address end) { void Disassembler::decode(CodeBlob* cb, outputStream* st) { if (!load_library()) return; decode_env env(cb, st); - env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb); + env.output()->print_cr("Decoding CodeBlob " PTR_FORMAT, cb); env.decode_instructions(cb->code_begin(), cb->code_end()); } @@ -446,7 +446,7 @@ void Disassembler::decode(address start, address end, outputStream* st) { void Disassembler::decode(nmethod* nm, outputStream* st) { if (!load_library()) return; decode_env env(nm, st); - env.output()->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm); + env.output()->print_cr("Decoding compiled method " PTR_FORMAT ":", nm); env.output()->print_cr("Code:"); #ifdef SHARK @@ -478,9 +478,9 @@ void Disassembler::decode(nmethod* nm, outputStream* st) { int offset = 0; for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) { if ((offset % 8) == 0) { - env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, (intptr_t) p, offset, *((int32_t*) p), *((int64_t*) p)); + env.output()->print_cr(" " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, p, offset, *((int32_t*) p), *((int64_t*) p)); } else { - env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT, (intptr_t) p, offset, *((int32_t*) p)); + env.output()->print_cr(" " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT, p, offset, *((int32_t*) p)); } } } diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp index afc8797ea33..1585c5d8387 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp @@ -29,13 +29,14 @@ #include "memory/sharedHeap.hpp" #include "memory/space.inline.hpp" #include "memory/universe.hpp" +#include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/virtualspace.hpp" void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - ClearNoncleanCardWrapper* cl, + OopsInGenClosure* cl, + CardTableRS* ct, int n_threads) { assert(n_threads > 0, "Error: expected n_threads > 0"); assert((n_threads == 1 && ParallelGCThreads == 0) || @@ -49,14 +50,14 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); - int n_strides = n_threads * StridesPerThread; + int n_strides = n_threads * ParGCStridesPerThread; SequentialSubTasksDone* pst = sp->par_seq_tasks(); pst->set_n_threads(n_threads); pst->set_n_tasks(n_strides); int stride = 0; while (!pst->is_task_claimed(/* reference */ stride)) { - process_stride(sp, mr, stride, n_strides, dcto_cl, cl, + process_stride(sp, mr, stride, n_strides, cl, ct, lowest_non_clean, lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); @@ -79,13 +80,13 @@ CardTableModRefBS:: process_stride(Space* sp, MemRegion used, jint stride, int n_strides, - DirtyCardToOopClosure* dcto_cl, - ClearNoncleanCardWrapper* cl, + OopsInGenClosure* cl, + CardTableRS* ct, jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size) { - // We don't have to go downwards here; it wouldn't help anyway, - // because of parallelism. + // We go from higher to lower addresses here; it wouldn't help that much + // because of the strided parallelism pattern used here. // Find the first card address of the first chunk in the stride that is // at least "bottom" of the used region. @@ -98,25 +99,35 @@ process_stride(Space* sp, if ((uintptr_t)stride >= start_chunk_stride_num) { chunk_card_start = (jbyte*)(start_card + (stride - start_chunk_stride_num) * - CardsPerStrideChunk); + ParGCCardsPerStrideChunk); } else { // Go ahead to the next chunk group boundary, then to the requested stride. chunk_card_start = (jbyte*)(start_card + (n_strides - start_chunk_stride_num + stride) * - CardsPerStrideChunk); + ParGCCardsPerStrideChunk); } while (chunk_card_start < end_card) { - // We don't have to go downwards here; it wouldn't help anyway, - // because of parallelism. (We take care with "min_done"; see below.) + // Even though we go from lower to higher addresses below, the + // strided parallelism can interleave the actual processing of the + // dirty pages in various ways. For a specific chunk within this + // stride, we take care to avoid double scanning or missing a card + // by suitably initializing the "min_done" field in process_chunk_boundaries() + // below, together with the dirty region extension accomplished in + // DirtyCardToOopClosure::do_MemRegion(). + jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; // Invariant: chunk_mr should be fully contained within the "used" region. - jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk; MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), chunk_card_end >= end_card ? used.end() : addr_for(chunk_card_end)); assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), + cl->gen_boundary()); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); + + // Process the chunk. process_chunk_boundaries(sp, dcto_cl, @@ -126,17 +137,30 @@ process_stride(Space* sp, lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); + // We want the LNC array updates above in process_chunk_boundaries + // to be visible before any of the card table value changes as a + // result of the dirty card iteration below. + OrderAccess::storestore(); + // We do not call the non_clean_card_iterate_serial() version because - // we want to clear the cards, and the ClearNoncleanCardWrapper closure - // itself does the work of finding contiguous dirty ranges of cards to - // process (and clear). - cl->do_MemRegion(chunk_mr); + // we want to clear the cards: clear_cl here does the work of finding + // contiguous dirty ranges of cards to process and clear. + clear_cl.do_MemRegion(chunk_mr); // Find the next chunk of the stride. - chunk_card_start += CardsPerStrideChunk * n_strides; + chunk_card_start += ParGCCardsPerStrideChunk * n_strides; } } + +// If you want a talkative process_chunk_boundaries, +// then #define NOISY(x) x +#ifdef NOISY +#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow" +#else +#define NOISY(x) +#endif + void CardTableModRefBS:: process_chunk_boundaries(Space* sp, @@ -147,127 +171,233 @@ process_chunk_boundaries(Space* sp, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size) { - // We must worry about the chunk boundaries. + // We must worry about non-array objects that cross chunk boundaries, + // because such objects are both precisely and imprecisely marked: + // .. if the head of such an object is dirty, the entire object + // needs to be scanned, under the interpretation that this + // was an imprecise mark + // .. if the head of such an object is not dirty, we can assume + // precise marking and it's efficient to scan just the dirty + // cards. + // In either case, each scanned reference must be scanned precisely + // once so as to avoid cloning of a young referent. For efficiency, + // our closures depend on this property and do not protect against + // double scans. - // First, set our max_to_do: - HeapWord* max_to_do = NULL; uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start()); cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index; - if (chunk_mr.end() < used.end()) { - // This is not the last chunk in the used region. What is the last - // object? - HeapWord* last_block = sp->block_start(chunk_mr.end()); - assert(last_block <= chunk_mr.end(), "In case this property changes."); - if (last_block == chunk_mr.end() - || !sp->block_is_obj(last_block)) { - max_to_do = chunk_mr.end(); + NOISY(tty->print_cr("===========================================================================");) + NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")", + chunk_mr.start(), chunk_mr.end());) + // First, set "our" lowest_non_clean entry, which would be + // used by the thread scanning an adjoining left chunk with + // a non-array object straddling the mutual boundary. + // Find the object that spans our boundary, if one exists. + // first_block is the block possibly straddling our left boundary. + HeapWord* first_block = sp->block_start(chunk_mr.start()); + assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), + "First chunk should always have a co-initial block"); + // Does the block straddle the chunk's left boundary, and is it + // a non-array object? + if (first_block < chunk_mr.start() // first block straddles left bdry + && sp->block_is_obj(first_block) // first block is an object + && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) + || oop(first_block)->is_typeArray())) { + // Find our least non-clean card, so that a left neighbour + // does not scan an object straddling the mutual boundary + // too far to the right, and attempt to scan a portion of + // that object twice. + jbyte* first_dirty_card = NULL; + jbyte* last_card_of_first_obj = + byte_for(first_block + sp->block_size(first_block) - 1); + jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); + jbyte* last_card_to_check = + (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, + (intptr_t) last_card_of_first_obj); + // Note that this does not need to go beyond our last card + // if our first object completely straddles this chunk. + for (jbyte* cur = first_card_of_cur_chunk; + cur <= last_card_to_check; cur++) { + jbyte val = *cur; + if (card_will_be_scanned(val)) { + first_dirty_card = cur; break; + } else { + assert(!card_may_have_been_dirty(val), "Error"); + } + } + if (first_dirty_card != NULL) { + NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk", + first_dirty_card);) + assert(0 <= cur_chunk_index && cur_chunk_index < lowest_non_clean_chunk_size, + "Bounds error."); + assert(lowest_non_clean[cur_chunk_index] == NULL, + "Write exactly once : value should be stable hereafter for this round"); + lowest_non_clean[cur_chunk_index] = first_dirty_card; + } NOISY(else { + tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL"); + // In the future, we could have this thread look for a non-NULL value to copy from its + // right neighbour (up to the end of the first object). + if (last_card_of_cur_chunk < last_card_of_first_obj) { + tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n" + " might be efficient to get value from right neighbour?"); + } + }) + } else { + // In this case we can help our neighbour by just asking them + // to stop at our first card (even though it may not be dirty). + NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");) + assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); + jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; + } + NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT + " which corresponds to the heap address " PTR_FORMAT, + cur_chunk_index, lowest_non_clean[cur_chunk_index], + (lowest_non_clean[cur_chunk_index] != NULL) + ? addr_for(lowest_non_clean[cur_chunk_index]) + : NULL);) + NOISY(tty->print_cr("---------------------------------------------------------------------------");) + + // Next, set our own max_to_do, which will strictly/exclusively bound + // the highest address that we will scan past the right end of our chunk. + HeapWord* max_to_do = NULL; + if (chunk_mr.end() < used.end()) { + // This is not the last chunk in the used region. + // What is our last block? We check the first block of + // the next (right) chunk rather than strictly check our last block + // because it's potentially more efficient to do so. + HeapWord* const last_block = sp->block_start(chunk_mr.end()); + assert(last_block <= chunk_mr.end(), "In case this property changes."); + if ((last_block == chunk_mr.end()) // our last block does not straddle boundary + || !sp->block_is_obj(last_block) // last_block isn't an object + || oop(last_block)->is_objArray() // last_block is an array (precisely marked) + || oop(last_block)->is_typeArray()) { + max_to_do = chunk_mr.end(); + NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n" + " max_to_do left at " PTR_FORMAT, max_to_do);) } else { - // It is an object and starts before the end of the current chunk. + assert(last_block < chunk_mr.end(), "Tautology"); + // It is a non-array object that straddles the right boundary of this chunk. // last_obj_card is the card corresponding to the start of the last object // in the chunk. Note that the last object may not start in // the chunk. - jbyte* last_obj_card = byte_for(last_block); - if (!card_may_have_been_dirty(*last_obj_card)) { - // The card containing the head is not dirty. Any marks in + jbyte* const last_obj_card = byte_for(last_block); + const jbyte val = *last_obj_card; + if (!card_will_be_scanned(val)) { + assert(!card_may_have_been_dirty(val), "Error"); + // The card containing the head is not dirty. Any marks on // subsequent cards still in this chunk must have been made - // precisely; we can cap processing at the end. + // precisely; we can cap processing at the end of our chunk. max_to_do = chunk_mr.end(); + NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n" + " max_to_do left at " PTR_FORMAT, + max_to_do);) } else { // The last object must be considered dirty, and extends onto the // following chunk. Look for a dirty card in that chunk that will // bound our processing. jbyte* limit_card = NULL; - size_t last_block_size = sp->block_size(last_block); - jbyte* last_card_of_last_obj = + const size_t last_block_size = sp->block_size(last_block); + jbyte* const last_card_of_last_obj = byte_for(last_block + last_block_size - 1); - jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end()); + jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); // This search potentially goes a long distance looking - // for the next card that will be scanned. For example, - // an object that is an array of primitives will not - // have any cards covering regions interior to the array - // that will need to be scanned. The scan can be terminated - // at the last card of the next chunk. That would leave - // limit_card as NULL and would result in "max_to_do" - // being set with the LNC value or with the end - // of the last block. - jbyte* last_card_of_next_chunk = first_card_of_next_chunk + - CardsPerStrideChunk; - assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) - == CardsPerStrideChunk, "last card of next chunk may be wrong"); - jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj, - last_card_of_next_chunk); + // for the next card that will be scanned, terminating + // at the end of the last_block, if no earlier dirty card + // is found. + assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, + "last card of next chunk may be wrong"); for (jbyte* cur = first_card_of_next_chunk; - cur <= last_card_to_check; cur++) { - if (card_will_be_scanned(*cur)) { + cur <= last_card_of_last_obj; cur++) { + const jbyte val = *cur; + if (card_will_be_scanned(val)) { + NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x", + cur, (int)val);) limit_card = cur; break; + } else { + assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); } } - assert(0 <= cur_chunk_index+1 && - cur_chunk_index+1 < lowest_non_clean_chunk_size, - "Bounds error."); - // LNC for the next chunk - jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1]; - if (limit_card == NULL) { - limit_card = lnc_card; - } if (limit_card != NULL) { - if (lnc_card != NULL) { - limit_card = (jbyte*)MIN2((intptr_t)limit_card, - (intptr_t)lnc_card); - } max_to_do = addr_for(limit_card); + assert(limit_card != NULL && max_to_do != NULL, "Error"); + NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT + " max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: " + PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT, + limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));) } else { + // The following is a pessimistic value, because it's possible + // that a dirty card on a subsequent chunk has been cleared by + // the time we get to look at it; we'll correct for that further below, + // using the LNC array which records the least non-clean card + // before cards were cleared in a particular chunk. + limit_card = last_card_of_last_obj; max_to_do = last_block + last_block_size; + assert(limit_card != NULL && max_to_do != NULL, "Error"); + NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n" + " Setting limit_card to " PTR_FORMAT + " and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT, + limit_card, last_block, last_block_size, max_to_do);) } + assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, + "Bounds error."); + // It is possible that a dirty card for the last object may have been + // cleared before we had a chance to examine it. In that case, the value + // will have been logged in the LNC for that chunk. + // We need to examine as many chunks to the right as this object + // covers. + const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) + - lowest_non_clean_base_chunk_index; + DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) + - lowest_non_clean_base_chunk_index;) + assert(last_chunk_index_to_check <= last_chunk_index, + err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT + " exceeds last_chunk_index " INTPTR_FORMAT, + last_chunk_index_to_check, last_chunk_index)); + for (uintptr_t lnc_index = cur_chunk_index + 1; + lnc_index <= last_chunk_index_to_check; + lnc_index++) { + jbyte* lnc_card = lowest_non_clean[lnc_index]; + if (lnc_card != NULL) { + // we can stop at the first non-NULL entry we find + if (lnc_card <= limit_card) { + NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT, + " max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT, + lnc_card, limit_card, addr_for(lnc_card), max_to_do);) + limit_card = lnc_card; + max_to_do = addr_for(limit_card); + assert(limit_card != NULL && max_to_do != NULL, "Error"); + } + // In any case, we break now + break; + } // else continue to look for a non-NULL entry if any + } + assert(limit_card != NULL && max_to_do != NULL, "Error"); } + assert(max_to_do != NULL, "OOPS 1 !"); } - assert(max_to_do != NULL, "OOPS!"); + assert(max_to_do != NULL, "OOPS 2!"); } else { max_to_do = used.end(); + NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n" + " max_to_do left at " PTR_FORMAT, + max_to_do);) } + assert(max_to_do != NULL, "OOPS 3!"); // Now we can set the closure we're using so it doesn't to beyond // max_to_do. dcto_cl->set_min_done(max_to_do); #ifndef PRODUCT dcto_cl->set_last_bottom(max_to_do); #endif - - // Now we set *our" lowest_non_clean entry. - // Find the object that spans our boundary, if one exists. - // Nothing to do on the first chunk. - if (chunk_mr.start() > used.start()) { - // first_block is the block possibly spanning the chunk start - HeapWord* first_block = sp->block_start(chunk_mr.start()); - // Does the block span the start of the chunk and is it - // an object? - if (first_block < chunk_mr.start() && - sp->block_is_obj(first_block)) { - jbyte* first_dirty_card = NULL; - jbyte* last_card_of_first_obj = - byte_for(first_block + sp->block_size(first_block) - 1); - jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); - jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); - jbyte* last_card_to_check = - (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, - (intptr_t) last_card_of_first_obj); - for (jbyte* cur = first_card_of_cur_chunk; - cur <= last_card_to_check; cur++) { - if (card_will_be_scanned(*cur)) { - first_dirty_card = cur; break; - } - } - if (first_dirty_card != NULL) { - assert(0 <= cur_chunk_index && - cur_chunk_index < lowest_non_clean_chunk_size, - "Bounds error."); - lowest_non_clean[cur_chunk_index] = first_dirty_card; - } - } - } + NOISY(tty->print_cr("===========================================================================\n");) } +#undef NOISY + void CardTableModRefBS:: get_LNC_array_for_space(Space* sp, @@ -283,8 +413,8 @@ get_LNC_array_for_space(Space* sp, // LNC array for the covered region. Any later expansion can't affect // the used_at_save_marks region. // (I observed a bug in which the first thread to execute this would - // resize, and then it would cause "expand_and_allocates" that would - // Increase the number of chunks in the covered region. Then a second + // resize, and then it would cause "expand_and_allocate" that would + // increase the number of chunks in the covered region. Then a second // thread would come and execute this, see that the size didn't match, // and free and allocate again. So the first thread would be using a // freed "_lowest_non_clean" array.) diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp index c9c50b3f6a3..00177bb75d2 100644 --- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp @@ -77,7 +77,23 @@ inline void ParScanClosure::do_oop_work(T* p, if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { - assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); +#ifndef PRODUCT + if (_g->to()->is_in_reserved(obj)) { + tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p); + GenCollectedHeap* gch = (GenCollectedHeap*)Universe::heap(); + Space* sp = gch->space_containing(p); + oop obj = oop(sp->block_start(p)); + assert((HeapWord*)obj < (HeapWord*)p, "Error"); + tty->print_cr("Object: " PTR_FORMAT, obj); + tty->print_cr("-------"); + obj->print(); + tty->print_cr("-----"); + tty->print_cr("Heap:"); + tty->print_cr("-----"); + gch->print(); + ShouldNotReachHere(); + } +#endif // OK, we need to ensure that it is copied. // We read the klass and mark in this order, so that we can reliably // get the size of the object: if the mark we read is not a diff --git a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp index c407fbaba66..a792812954f 100644 --- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp +++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp @@ -175,19 +175,32 @@ class AbstractInterpreter: AllStatic { int temps, int popframe_args, int monitors, + int caller_actual_parameters, int callee_params, int callee_locals, - bool is_top_frame); + bool is_top_frame) { + return layout_activation(method, + temps, + popframe_args, + monitors, + caller_actual_parameters, + callee_params, + callee_locals, + (frame*)NULL, + (frame*)NULL, + is_top_frame); + } static int layout_activation(methodOop method, - int temps, - int popframe_args, - int monitors, - int callee_params, - int callee_locals, - frame* caller, - frame* interpreter_frame, - bool is_top_frame); + int temps, + int popframe_args, + int monitors, + int caller_actual_parameters, + int callee_params, + int callee_locals, + frame* caller, + frame* interpreter_frame, + bool is_top_frame); // Runtime support static bool is_not_reached( methodHandle method, int bci); diff --git a/hotspot/src/share/vm/memory/blockOffsetTable.cpp b/hotspot/src/share/vm/memory/blockOffsetTable.cpp index 7caafeeb977..210f744a1f0 100644 --- a/hotspot/src/share/vm/memory/blockOffsetTable.cpp +++ b/hotspot/src/share/vm/memory/blockOffsetTable.cpp @@ -541,20 +541,33 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( // to go back by. size_t n_cards_back = entry_to_cards_back(offset); q -= (N_words * n_cards_back); - assert(q >= _sp->bottom(), "Went below bottom!"); + assert(q >= _sp->bottom(), + err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, + q, _sp->bottom())); + assert(q < _sp->end(), + err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, + q, _sp->end())); index -= n_cards_back; offset = _array->offset_array(index); } assert(offset < N_words, "offset too large"); index--; q -= offset; + assert(q >= _sp->bottom(), + err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, + q, _sp->bottom())); + assert(q < _sp->end(), + err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, + q, _sp->end())); HeapWord* n = q; while (n <= addr) { debug_only(HeapWord* last = q); // for debugging q = n; n += _sp->block_size(n); - assert(n > q, err_msg("Looping at: " INTPTR_FORMAT, n)); + assert(n > q, + err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")", + n, last, _sp->bottom(), _sp->end())); } assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr)); assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n)); diff --git a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp index 356a77adeeb..c6645015dea 100644 --- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp +++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp @@ -455,25 +455,29 @@ bool CardTableModRefBS::mark_card_deferred(size_t card_index) { return true; } - void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - ClearNoncleanCardWrapper* cl) { + OopsInGenClosure* cl, + CardTableRS* ct) { if (!mr.is_empty()) { int n_threads = SharedHeap::heap()->n_par_threads(); if (n_threads > 0) { #ifndef SERIALGC - non_clean_card_iterate_parallel_work(sp, mr, dcto_cl, cl, n_threads); + non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); #else // SERIALGC fatal("Parallel gc not supported here."); #endif // SERIALGC } else { // We do not call the non_clean_card_iterate_serial() version below because // we want to clear the cards (which non_clean_card_iterate_serial() does not - // do for us), and the ClearNoncleanCardWrapper closure itself does the work - // of finding contiguous dirty ranges of cards to process (and clear). - cl->do_MemRegion(mr); + // do for us): clear_cl here does the work of finding contiguous dirty ranges + // of cards to process and clear. + + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), + cl->gen_boundary()); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); + + clear_cl.do_MemRegion(mr); } } } diff --git a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp index a8d48515ce4..8ed4e03d979 100644 --- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp +++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp @@ -150,7 +150,9 @@ class CardTableModRefBS: public ModRefBarrierSet { // Mapping from address to card marking array entry jbyte* byte_for(const void* p) const { assert(_whole_heap.contains(p), - "out of bounds access to card marking array"); + err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of " + " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")", + p, _whole_heap.start(), _whole_heap.end())); jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; assert(result >= _byte_map && result < _byte_map + _byte_map_size, "out of bounds accessor for card marking array"); @@ -173,18 +175,17 @@ class CardTableModRefBS: public ModRefBarrierSet { // A variant of the above that will operate in a parallel mode if // worker threads are available, and clear the dirty cards as it // processes them. - // ClearNoncleanCardWrapper cl must wrap the DirtyCardToOopClosure dcto_cl, - // which may itself be modified by the method. + // XXX ??? MemRegionClosure above vs OopsInGenClosure below XXX + // XXX some new_dcto_cl's take OopClosure's, plus as above there are + // some MemRegionClosures. Clean this up everywhere. XXX void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - ClearNoncleanCardWrapper* cl); + OopsInGenClosure* cl, CardTableRS* ct); private: // Work method used to implement non_clean_card_iterate_possibly_parallel() // above in the parallel case. void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - ClearNoncleanCardWrapper* cl, + OopsInGenClosure* cl, CardTableRS* ct, int n_threads); protected: @@ -198,11 +199,6 @@ class CardTableModRefBS: public ModRefBarrierSet { // *** Support for parallel card scanning. - enum SomeConstantsForParallelism { - StridesPerThread = 2, - CardsPerStrideChunk = 256 - }; - // This is an array, one element per covered region of the card table. // Each entry is itself an array, with one element per chunk in the // covered region. Each entry of these arrays is the lowest non-clean @@ -235,7 +231,7 @@ class CardTableModRefBS: public ModRefBarrierSet { // covers the given address. uintptr_t addr_to_chunk_index(const void* addr) { uintptr_t card = (uintptr_t) byte_for(addr); - return card / CardsPerStrideChunk; + return card / ParGCCardsPerStrideChunk; } // Apply cl, which must either itself apply dcto_cl or be dcto_cl, @@ -243,8 +239,8 @@ class CardTableModRefBS: public ModRefBarrierSet { void process_stride(Space* sp, MemRegion used, jint stride, int n_strides, - DirtyCardToOopClosure* dcto_cl, - ClearNoncleanCardWrapper* cl, + OopsInGenClosure* cl, + CardTableRS* ct, jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size); @@ -457,14 +453,18 @@ public: size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); HeapWord* result = (HeapWord*) (delta << card_shift); assert(_whole_heap.contains(result), - "out of bounds accessor from card marking array"); + err_msg("Returning result = "PTR_FORMAT" out of bounds of " + " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")", + result, _whole_heap.start(), _whole_heap.end())); return result; } // Mapping from address to card marking array index. size_t index_for(void* p) { assert(_whole_heap.contains(p), - "out of bounds access to card marking array"); + err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of " + " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")", + p, _whole_heap.start(), _whole_heap.end())); return byte_for(p) - _byte_map; } @@ -482,7 +482,7 @@ public: void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; static size_t par_chunk_heapword_alignment() { - return CardsPerStrideChunk * card_size_in_words; + return ParGCCardsPerStrideChunk * card_size_in_words; } }; diff --git a/hotspot/src/share/vm/memory/cardTableRS.cpp b/hotspot/src/share/vm/memory/cardTableRS.cpp index 551ba7eb8a4..4b47da783a1 100644 --- a/hotspot/src/share/vm/memory/cardTableRS.cpp +++ b/hotspot/src/share/vm/memory/cardTableRS.cpp @@ -162,7 +162,7 @@ inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { } ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( - MemRegionClosure* dirty_card_closure, CardTableRS* ct) : + DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : _dirty_card_closure(dirty_card_closure), _ct(ct) { _is_par = (SharedHeap::heap()->n_par_threads() > 0); } @@ -246,10 +246,6 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { void CardTableRS::younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl) { - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(), - cl->gen_boundary()); - ClearNoncleanCardWrapper clear_cl(dcto_cl, this); - const MemRegion urasm = sp->used_region_at_save_marks(); #ifdef ASSERT // Convert the assertion check to a warning if we are running @@ -275,10 +271,10 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp, if (!urasm.equals(urasm2)) { warning("CMS+ParNew: Flickering used_region_at_save_marks()!!"); } + ShouldNotReachHere(); } #endif - _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, - dcto_cl, &clear_cl); + _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); } void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) { diff --git a/hotspot/src/share/vm/memory/cardTableRS.hpp b/hotspot/src/share/vm/memory/cardTableRS.hpp index 72c5dacf83d..a15b85f74a5 100644 --- a/hotspot/src/share/vm/memory/cardTableRS.hpp +++ b/hotspot/src/share/vm/memory/cardTableRS.hpp @@ -31,7 +31,6 @@ class Space; class OopsInGenClosure; -class DirtyCardToOopClosure; // This kind of "GenRemSet" uses a card table both as shared data structure // for a mod ref barrier set and for the rem set information. @@ -167,7 +166,7 @@ public: }; class ClearNoncleanCardWrapper: public MemRegionClosure { - MemRegionClosure* _dirty_card_closure; + DirtyCardToOopClosure* _dirty_card_closure; CardTableRS* _ct; bool _is_par; private: @@ -179,7 +178,7 @@ private: inline bool clear_card_parallel(jbyte* entry); public: - ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure, CardTableRS* ct); + ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct); void do_MemRegion(MemRegion mr); }; diff --git a/hotspot/src/share/vm/memory/space.cpp b/hotspot/src/share/vm/memory/space.cpp index 1971332cd06..ff91ba30cfe 100644 --- a/hotspot/src/share/vm/memory/space.cpp +++ b/hotspot/src/share/vm/memory/space.cpp @@ -97,6 +97,14 @@ void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, } } +// We get called with "mr" representing the dirty region +// that we want to process. Because of imprecise marking, +// we may need to extend the incoming "mr" to the right, +// and scan more. However, because we may already have +// scanned some of that extended region, we may need to +// trim its right-end back some so we do not scan what +// we (or another worker thread) may already have scanned +// or planning to scan. void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { // Some collectors need to do special things whenever their dirty @@ -148,7 +156,7 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { // e.g. the dirty card region is entirely in a now free object // -- something that could happen with a concurrent sweeper. bottom = MIN2(bottom, top); - mr = MemRegion(bottom, top); + MemRegion extended_mr = MemRegion(bottom, top); assert(bottom <= top && (_precision != CardTableModRefBS::ObjHeadPreciseArray || _min_done == NULL || @@ -156,8 +164,8 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { "overlap!"); // Walk the region if it is not empty; otherwise there is nothing to do. - if (!mr.is_empty()) { - walk_mem_region(mr, bottom_obj, top); + if (!extended_mr.is_empty()) { + walk_mem_region(extended_mr, bottom_obj, top); } // An idempotent closure might be applied in any order, so we don't diff --git a/hotspot/src/share/vm/oops/constantPoolKlass.cpp b/hotspot/src/share/vm/oops/constantPoolKlass.cpp index 627a2017ebe..a5ca5cdfc7f 100644 --- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp +++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp @@ -285,10 +285,9 @@ int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPool(), "should be constant pool"); constantPoolOop cp = (constantPoolOop) obj; - if (cp->tags() != NULL && - (!JavaObjectsInPerm || (EnableInvokeDynamic && cp->has_pseudo_string()))) { + if (cp->tags() != NULL) { for (int i = 1; i < cp->length(); ++i) { - if (cp->tag_at(i).is_string()) { + if (cp->is_pointer_entry(i)) { oop* base = cp->obj_at_addr_raw(i); if (PSScavenge::should_scavenge(base)) { pm->claim_or_forward_depth(base); @@ -342,6 +341,11 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) { anObj->print_value_on(st); st->print(" {0x%lx}", (address)anObj); break; + case JVM_CONSTANT_Object : + anObj = cp->object_at(index); + anObj->print_value_on(st); + st->print(" {0x%lx}", (address)anObj); + break; case JVM_CONSTANT_Integer : st->print("%d", cp->int_at(index)); break; @@ -432,23 +436,21 @@ void constantPoolKlass::oop_verify_on(oop obj, outputStream* st) { guarantee(cp->is_perm(), "should be in permspace"); if (!cp->partially_loaded()) { for (int i = 0; i< cp->length(); i++) { + constantTag tag = cp->tag_at(i); CPSlot entry = cp->slot_at(i); - if (cp->tag_at(i).is_klass()) { + if (tag.is_klass()) { if (entry.is_oop()) { guarantee(entry.get_oop()->is_perm(), "should be in permspace"); guarantee(entry.get_oop()->is_klass(), "should be klass"); } - } - if (cp->tag_at(i).is_unresolved_klass()) { + } else if (tag.is_unresolved_klass()) { if (entry.is_oop()) { guarantee(entry.get_oop()->is_perm(), "should be in permspace"); guarantee(entry.get_oop()->is_klass(), "should be klass"); } - } - if (cp->tag_at(i).is_symbol()) { + } else if (tag.is_symbol()) { guarantee(entry.get_symbol()->refcount() != 0, "should have nonzero reference count"); - } - if (cp->tag_at(i).is_unresolved_string()) { + } else if (tag.is_unresolved_string()) { if (entry.is_oop()) { guarantee(entry.get_oop()->is_perm(), "should be in permspace"); guarantee(entry.get_oop()->is_instance(), "should be instance"); @@ -456,8 +458,7 @@ void constantPoolKlass::oop_verify_on(oop obj, outputStream* st) { else { guarantee(entry.get_symbol()->refcount() != 0, "should have nonzero reference count"); } - } - if (cp->tag_at(i).is_string()) { + } else if (tag.is_string()) { if (!cp->has_pseudo_string()) { if (entry.is_oop()) { guarantee(!JavaObjectsInPerm || entry.get_oop()->is_perm(), @@ -467,8 +468,11 @@ void constantPoolKlass::oop_verify_on(oop obj, outputStream* st) { } else { // can be non-perm, can be non-instance (array) } + } else if (tag.is_object()) { + assert(entry.get_oop()->is_oop(), "should be some valid oop"); + } else { + assert(!cp->is_pointer_entry(i), "unhandled oop type in constantPoolKlass::verify_on"); } - // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc. } guarantee(cp->tags()->is_perm(), "should be in permspace"); guarantee(cp->tags()->is_typeArray(), "should be type array"); diff --git a/hotspot/src/share/vm/opto/bytecodeInfo.cpp b/hotspot/src/share/vm/opto/bytecodeInfo.cpp index 55844906bfa..d3caa640b27 100644 --- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp +++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp @@ -89,7 +89,7 @@ static bool is_init_with_ea(ciMethod* callee_method, } // positive filter: should send be inlined? returns NULL, if yes, or rejection msg -const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { +const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { // Allows targeted inlining if(callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); @@ -102,8 +102,7 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_m // positive filter: should send be inlined? returns NULL (--> yes) // or rejection msg - int max_size = C->max_inline_size(); - int size = callee_method->code_size(); + int size = callee_method->code_size(); // Check for too many throws (and not too huge) if(callee_method->interpreter_throwout_count() > InlineThrowCount && @@ -120,18 +119,36 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_m return NULL; // size and frequency are represented in a new way } + int default_max_inline_size = C->max_inline_size(); + int inline_small_code_size = InlineSmallCode / 4; + int max_inline_size = default_max_inline_size; + int call_site_count = method()->scale_count(profile.count()); int invoke_count = method()->interpreter_invocation_count(); - assert( invoke_count != 0, "Require invokation count greater than zero"); - int freq = call_site_count/invoke_count; + + // Bytecoded method handle adapters do not have interpreter + // profiling data but only made up MDO data. Get the counter from + // there. + if (caller_method->is_method_handle_adapter()) { + assert(method()->method_data_or_null(), "must have an MDO"); + ciMethodData* mdo = method()->method_data(); + ciProfileData* mha_profile = mdo->bci_to_data(caller_bci); + assert(mha_profile, "must exist"); + CounterData* cd = mha_profile->as_CounterData(); + invoke_count = cd->count(); + call_site_count = invoke_count; // use the same value + } + + assert(invoke_count != 0, "require invocation count greater than zero"); + int freq = call_site_count / invoke_count; // bump the max size if the call is frequent if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount) || is_init_with_ea(callee_method, caller_method, C)) { - max_size = C->freq_inline_size(); - if (size <= max_size && TraceFrequencyInlining) { + max_inline_size = C->freq_inline_size(); + if (size <= max_inline_size && TraceFrequencyInlining) { CompileTask::print_inline_indent(inline_depth()); tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); CompileTask::print_inline_indent(inline_depth()); @@ -141,11 +158,11 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_m } else { // Not hot. Check for medium-sized pre-existing nmethod at cold sites. if (callee_method->has_compiled_code() && - callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4) + callee_method->instructions_size(CompLevel_full_optimization) > inline_small_code_size) return "already compiled into a medium method"; } - if (size > max_size) { - if (max_size > C->max_inline_size()) + if (size > max_inline_size) { + if (max_inline_size > default_max_inline_size) return "hot method too big"; return "too big"; } @@ -154,7 +171,7 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_m // negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg -const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { +const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg if (!UseOldInlining) { const char* fail = NULL; @@ -269,14 +286,13 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_ } const char *msg = NULL; - if ((msg = shouldInline(callee_method, caller_method, caller_bci, - profile, wci_result)) != NULL) { + msg = should_inline(callee_method, caller_method, caller_bci, profile, wci_result); + if (msg != NULL) return msg; - } - if ((msg = shouldNotInline(callee_method, caller_method, - wci_result)) != NULL) { + + msg = should_not_inline(callee_method, caller_method, wci_result); + if (msg != NULL) return msg; - } if (InlineAccessors && callee_method->is_accessor()) { // accessor methods are not subject to any of the following limits. @@ -492,9 +508,8 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J new_depth_adjust -= 1; // don't count method handle calls from java.lang.invoke implem } if (new_depth_adjust != 0 && PrintInlining) { - stringStream nm1; caller_jvms->method()->print_name(&nm1); - stringStream nm2; callee_method->print_name(&nm2); - tty->print_cr("discounting inlining depth from %s to %s", nm1.base(), nm2.base()); + CompileTask::print_inline_indent(inline_depth()); + tty->print_cr(" \\-> discounting inline depth"); } if (new_depth_adjust != 0 && C->log()) { int id1 = C->log()->identify(caller_jvms->method()); diff --git a/hotspot/src/share/vm/opto/doCall.cpp b/hotspot/src/share/vm/opto/doCall.cpp index de066e992a3..3c97b3d6a20 100644 --- a/hotspot/src/share/vm/opto/doCall.cpp +++ b/hotspot/src/share/vm/opto/doCall.cpp @@ -62,7 +62,10 @@ void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_met CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) { - CallGenerator* cg; + CallGenerator* cg; + ciMethod* caller = jvms->method(); + int bci = jvms->bci(); + Bytecodes::Code bytecode = caller->java_code_at_bci(bci); guarantee(call_method != NULL, "failed method resolution"); // Dtrace currently doesn't work unless all calls are vanilla @@ -73,7 +76,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, // Note: When we get profiling during stage-1 compiles, we want to pull // from more specific profile data which pertains to this inlining. // Right now, ignore the information in jvms->caller(), and do method[bci]. - ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci()); + ciCallProfile profile = caller->call_profile_at_bci(bci); // See how many times this site has been invoked. int site_count = profile.count(); @@ -116,7 +119,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, // MethodHandle.invoke* are native methods which obviously don't // have bytecodes and so normal inlining fails. if (call_method->is_method_handle_invoke()) { - if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) { + if (bytecode != Bytecodes::_invokedynamic) { GraphKit kit(jvms); Node* n = kit.argument(0); @@ -125,17 +128,19 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, ciObject* const_oop = oop_ptr->const_oop(); ciMethodHandle* method_handle = const_oop->as_method_handle(); - // Set the actually called method to have access to the class - // and signature in the MethodHandleCompiler. + // Set the callee to have access to the class and signature in + // the MethodHandleCompiler. method_handle->set_callee(call_method); + method_handle->set_caller(caller); + method_handle->set_call_profile(&profile); // Get an adapter for the MethodHandle. ciMethod* target_method = method_handle->get_method_handle_adapter(); - CallGenerator* hit_cg = NULL; - if (target_method != NULL) - hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); - if (hit_cg != NULL && hit_cg->is_inline()) - return hit_cg; + if (target_method != NULL) { + CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); + if (hit_cg != NULL && hit_cg->is_inline()) + return hit_cg; + } } return CallGenerator::for_direct_call(call_method); @@ -148,18 +153,20 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, ciCallSite* call_site = str.get_call_site(); ciMethodHandle* method_handle = call_site->get_target(); - // Set the actually called method to have access to the class - // and signature in the MethodHandleCompiler. + // Set the callee to have access to the class and signature in + // the MethodHandleCompiler. method_handle->set_callee(call_method); + method_handle->set_caller(caller); + method_handle->set_call_profile(&profile); // Get an adapter for the MethodHandle. ciMethod* target_method = method_handle->get_invokedynamic_adapter(); - CallGenerator* hit_cg = NULL; - if (target_method != NULL) - hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); - if (hit_cg != NULL && hit_cg->is_inline()) { - CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method); - return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor); + if (target_method != NULL) { + CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); + if (hit_cg != NULL && hit_cg->is_inline()) { + CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method); + return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor); + } } // If something failed, generate a normal dynamic call. diff --git a/hotspot/src/share/vm/opto/loopTransform.cpp b/hotspot/src/share/vm/opto/loopTransform.cpp index a5d40ba2597..f720b79ce63 100644 --- a/hotspot/src/share/vm/opto/loopTransform.cpp +++ b/hotspot/src/share/vm/opto/loopTransform.cpp @@ -1230,7 +1230,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad set_ctrl(new_limit, C->root()); } else { // Limit is not constant. - { + if (loop_head->unrolled_count() == 1) { // only for first unroll // Separate limit by Opaque node in case it is an incremented // variable from previous loop to avoid using pre-incremented // value which could increase register pressure. diff --git a/hotspot/src/share/vm/opto/parse.hpp b/hotspot/src/share/vm/opto/parse.hpp index 85416bb756c..9237e659886 100644 --- a/hotspot/src/share/vm/opto/parse.hpp +++ b/hotspot/src/share/vm/opto/parse.hpp @@ -68,8 +68,8 @@ protected: JVMState* caller_jvms, int caller_bci); const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); - const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; - const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; + const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; + const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const; InlineTree *caller_tree() const { return _caller_tree; } diff --git a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp index de5803b69ee..a7caebe4b44 100644 --- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp +++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp @@ -3158,6 +3158,9 @@ inline bool VM_HeapWalkOperation::collect_stack_roots(JavaThread* java_thread, if (fr->is_entry_frame()) { last_entry_frame = fr; } + if (fr->is_ricochet_frame()) { + fr->oops_ricochet_do(blk, vf->register_map()); + } } vf = vf->sender(); diff --git a/hotspot/src/share/vm/prims/methodHandleWalk.cpp b/hotspot/src/share/vm/prims/methodHandleWalk.cpp index 936d1e73952..cc8f8b539df 100644 --- a/hotspot/src/share/vm/prims/methodHandleWalk.cpp +++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp @@ -31,6 +31,11 @@ * JSR 292 reference implementation: method handle structure analysis */ +#ifdef PRODUCT +#define print_method_handle(mh) {} +#else //PRODUCT +extern "C" void print_method_handle(oop mh); +#endif //PRODUCT // ----------------------------------------------------------------------------- // MethodHandleChain @@ -206,8 +211,10 @@ MethodHandleWalker::walk(TRAPS) { lose("bad argument index", CHECK_(empty)); } + bool retain_original_args = false; // used by fold/collect logic + // perform the adapter action - switch (chain().adapter_conversion_op()) { + switch (conv_op) { case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: // No changes to arguments; pass the bits through. break; @@ -216,51 +223,36 @@ MethodHandleWalker::walk(TRAPS) { // To keep the verifier happy, emit bitwise ("raw") conversions as needed. // See MethodHandles::same_basic_type_for_arguments for allowed conversions. Handle incoming_mtype(THREAD, chain().method_type_oop()); - oop outgoing_mh_oop = chain().vmtarget_oop(); - if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) - lose("outgoing target not a MethodHandle", CHECK_(empty)); - Handle outgoing_mtype(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); - outgoing_mh_oop = NULL; // GC safety + Handle outgoing_mtype; + { + oop outgoing_mh_oop = chain().vmtarget_oop(); + if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) + lose("outgoing target not a MethodHandle", CHECK_(empty)); + outgoing_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); + } int nptypes = java_lang_invoke_MethodType::ptype_count(outgoing_mtype()); if (nptypes != java_lang_invoke_MethodType::ptype_count(incoming_mtype())) lose("incoming and outgoing parameter count do not agree", CHECK_(empty)); + // Argument types. for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) { SlotState* arg_state = slot_state(slot); if (arg_state->_type == T_VOID) continue; - ArgToken arg = _outgoing.at(slot)._arg; - - klassOop in_klass = NULL; - klassOop out_klass = NULL; - BasicType inpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &in_klass); - BasicType outpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &out_klass); - assert(inpbt == arg.basic_type(), "sanity"); - - if (inpbt != outpbt) { - vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(inpbt, outpbt); - if (iid == vmIntrinsics::_none) { - lose("no raw conversion method", CHECK_(empty)); - } - ArgToken arglist[2]; - arglist[0] = arg; // outgoing 'this' - arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); - change_argument(inpbt, slot, outpbt, arg); - } + klassOop src_klass = NULL; + klassOop dst_klass = NULL; + BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &src_klass); + BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &dst_klass); + retype_raw_argument_type(src, dst, slot, CHECK_(empty)); i++; // We need to skip void slots at the top of the loop. } - BasicType inrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); - BasicType outrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); - if (inrbt != outrbt) { - if (inrbt == T_INT && outrbt == T_VOID) { - // See comments in MethodHandles::same_basic_type_for_arguments. - } else { - assert(false, "IMPLEMENT ME"); - lose("no raw conversion method", CHECK_(empty)); - } + // Return type. + { + BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); + BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); + retype_raw_return_type(src, dst, CHECK_(empty)); } break; } @@ -273,7 +265,7 @@ MethodHandleWalker::walk(TRAPS) { assert(dest == arg_state->_type, ""); ArgToken arg = arg_state->_arg; ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty)); - assert(arg.index() == new_arg.index(), "should be the same index"); + assert(arg.token_type() >= tt_symbolic || arg.index() == new_arg.index(), "should be the same index"); debug_only(dest_klass = (klassOop)badOop); break; } @@ -332,7 +324,7 @@ MethodHandleWalker::walk(TRAPS) { ArgToken arglist[2]; arglist[0] = arg; // outgoing value arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty)); + arg = make_invoke(NULL, boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); change_argument(src, arg_slot, T_OBJECT, arg); break; } @@ -404,8 +396,54 @@ MethodHandleWalker::walk(TRAPS) { break; } - case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC - lose("unimplemented", CHECK_(empty)); + case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: + retain_original_args = true; // and fall through: + case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { + // call argument MH recursively + //{static int x; if (!x++) print_method_handle(chain().method_handle_oop()); --x;} + Handle recursive_mh(THREAD, chain().adapter_arg_oop()); + if (!java_lang_invoke_MethodHandle::is_instance(recursive_mh())) { + lose("recursive target not a MethodHandle", CHECK_(empty)); + } + Handle recursive_mtype(THREAD, java_lang_invoke_MethodHandle::type(recursive_mh())); + int argc = java_lang_invoke_MethodType::ptype_count(recursive_mtype()); + int coll_slots = java_lang_invoke_MethodHandle::vmslots(recursive_mh()); + BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(recursive_mtype())); + ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, 1 + argc + 1); // 1+: mh, +1: sentinel + arglist[0] = make_oop_constant(recursive_mh(), CHECK_(empty)); + if (arg_slot < 0 || coll_slots < 0 || arg_slot + coll_slots > _outgoing.length()) { + lose("bad fold/collect arg slot", CHECK_(empty)); + } + for (int i = 0, slot = arg_slot + coll_slots - 1; slot >= arg_slot; slot--) { + SlotState* arg_state = slot_state(slot); + BasicType arg_type = arg_state->_type; + if (arg_type == T_VOID) continue; + ArgToken arg = _outgoing.at(slot)._arg; + if (i >= argc) { lose("bad fold/collect arg", CHECK_(empty)); } + arglist[1+i] = arg; + if (!retain_original_args) + change_argument(arg_type, slot, T_VOID, ArgToken(tt_void)); + } + arglist[1+argc] = ArgToken(); // sentinel + oop invoker = java_lang_invoke_MethodTypeForm::vmlayout( + java_lang_invoke_MethodType::form(recursive_mtype()) ); + if (invoker == NULL || !invoker->is_method()) { + lose("bad vmlayout slot", CHECK_(empty)); + } + // FIXME: consider inlining the invokee at the bytecode level + ArgToken ret = make_invoke(methodOop(invoker), vmIntrinsics::_none, + Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty)); + DEBUG_ONLY(invoker = NULL); + if (rtype == T_OBJECT) { + klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) ); + if (rklass != SystemDictionary::Object_klass() && + !Klass::cast(rklass)->is_interface()) { + // preserve type safety + ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty)); + } + } + int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0); + change_argument(T_VOID, ret_slot, rtype, ret); break; } @@ -452,9 +490,18 @@ MethodHandleWalker::walk(TRAPS) { Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty)); // Spread out the array elements. - Bytecodes::Code aload_op = Bytecodes::_aaload; - if (element_type != T_OBJECT) { - lose("primitive array NYI", CHECK_(empty)); + Bytecodes::Code aload_op = Bytecodes::_nop; + switch (element_type) { + case T_INT: aload_op = Bytecodes::_iaload; break; + case T_LONG: aload_op = Bytecodes::_laload; break; + case T_FLOAT: aload_op = Bytecodes::_faload; break; + case T_DOUBLE: aload_op = Bytecodes::_daload; break; + case T_OBJECT: aload_op = Bytecodes::_aaload; break; + case T_BOOLEAN: // fall through: + case T_BYTE: aload_op = Bytecodes::_baload; break; + case T_CHAR: aload_op = Bytecodes::_caload; break; + case T_SHORT: aload_op = Bytecodes::_saload; break; + default: lose("primitive array NYI", CHECK_(empty)); } int ap = arg_slot; for (int i = 0; i < spread_length; i++) { @@ -467,11 +514,6 @@ MethodHandleWalker::walk(TRAPS) { break; } - case java_lang_invoke_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code - case java_lang_invoke_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code - lose("unimplemented", CHECK_(empty)); - break; - default: lose("bad adapter conversion", CHECK_(empty)); break; @@ -495,7 +537,7 @@ MethodHandleWalker::walk(TRAPS) { lose("bad bound value", CHECK_(empty)); } } - debug_only(arg_oop = badOop); + DEBUG_ONLY(arg_oop = badOop); change_argument(T_VOID, arg_slot, arg_type, arg); } @@ -538,11 +580,10 @@ void MethodHandleWalker::walk_incoming_state(TRAPS) { } for (int i = 0; i < nptypes; i++) { klassOop arg_type_klass = NULL; - BasicType arg_type = java_lang_Class::as_BasicType( - java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); + BasicType arg_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); int index = new_local_index(arg_type); ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK); - debug_only(arg_type_klass = (klassOop) NULL); + DEBUG_ONLY(arg_type_klass = (klassOop) NULL); _outgoing.at_put(argp, make_state(arg_type, arg)); if (type2size[arg_type] == 2) { // add the extra slot, so we can model the JVM stack @@ -552,8 +593,7 @@ void MethodHandleWalker::walk_incoming_state(TRAPS) { } // call make_parameter at the end of the list for the return type klassOop ret_type_klass = NULL; - BasicType ret_type = java_lang_Class::as_BasicType( - java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); + BasicType ret_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); ArgToken ret = make_parameter(ret_type, ret_type_klass, -1, CHECK); // ignore ret; client can catch it if needed } @@ -604,12 +644,55 @@ int MethodHandleWalker::argument_count_slow() { #endif +// ----------------------------------------------------------------------------- +// MethodHandleWalker::retype_raw_conversion +// +// Do the raw retype conversions for OP_RETYPE_RAW. +void MethodHandleWalker::retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS) { + if (src != dst) { + if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) { + if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) { + if (for_return) Untested("MHW return raw conversion"); // still untested + vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst); + if (iid == vmIntrinsics::_none) { + lose("no raw conversion method", CHECK); + } + ArgToken arglist[2]; + if (!for_return) { + // argument type conversion + ArgToken arg = _outgoing.at(slot)._arg; + assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity"); + arglist[0] = arg; // outgoing 'this' + arglist[1] = ArgToken(); // sentinel + arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); + change_argument(src, slot, dst, arg); + } else { + // return type conversion + klassOop arg_klass = NULL; + arglist[0] = make_parameter(src, arg_klass, -1, CHECK); // return value + arglist[1] = ArgToken(); // sentinel + (void) make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); + } + } else { + // Nothing to do. + } + } else if (src == T_OBJECT && is_java_primitive(dst)) { + // ref-to-prim: discard ref, push zero + lose("requested ref-to-prim conversion not expected", CHECK); + } else { + lose("requested raw conversion not allowed", CHECK); + } + } +} + + // ----------------------------------------------------------------------------- // MethodHandleCompiler -MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, bool is_invokedynamic, TRAPS) +MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, int invoke_count, bool is_invokedynamic, TRAPS) : MethodHandleWalker(root, is_invokedynamic, THREAD), _callee(callee), + _invoke_count(invoke_count), _thread(THREAD), _bytecode(THREAD, 50), _constants(THREAD, 10), @@ -709,6 +792,7 @@ void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) { case Bytecodes::_astore_1: case Bytecodes::_astore_2: case Bytecodes::_astore_3: + case Bytecodes::_iand: case Bytecodes::_i2l: case Bytecodes::_i2f: case Bytecodes::_i2d: @@ -935,7 +1019,11 @@ MethodHandleCompiler::make_conversion(BasicType type, klassOop tk, Bytecodes::Co break; default: - ShouldNotReachHere(); + if (op == Bytecodes::_illegal) + lose("no such primitive conversion", THREAD); + else + lose("bad primitive conversion op", THREAD); + return make_prim_constant(type, &zero_jvalue, THREAD); } return make_parameter(type, tk, index, THREAD); @@ -946,7 +1034,9 @@ MethodHandleCompiler::make_conversion(BasicType type, klassOop tk, Bytecodes::Co // MethodHandleCompiler // -static jvalue zero_jvalue; +// Values used by the compiler. +jvalue MethodHandleCompiler::zero_jvalue = { 0 }; +jvalue MethodHandleCompiler::one_jvalue = { 1 }; // Emit bytecodes for the given invoke instruction. MethodHandleWalker::ArgToken @@ -954,18 +1044,18 @@ MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, MethodHandleWalker::ArgToken* argv, TRAPS) { + ArgToken zero; if (m == NULL) { // Get the intrinsic methodOop. m = vmIntrinsics::method_for(iid); if (m == NULL) { - ArgToken zero; lose(vmIntrinsics::name_at(iid), CHECK_(zero)); } } - klassOop klass = m->method_holder(); - Symbol* name = m->name(); - Symbol* signature = m->signature(); + klassOop klass = m->method_holder(); + Symbol* name = m->name(); + Symbol* signature = m->signature(); if (tailcall) { // Actually, in order to make these methods more recognizable, @@ -1031,7 +1121,6 @@ MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid, if (rbt != _rtype) { if (rbt == T_VOID) { // push a zero of the right sort - ArgToken zero; if (_rtype == T_OBJECT) { zero = make_oop_constant(NULL, CHECK_(zero)); } else { @@ -1041,9 +1130,27 @@ MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid, } else if (_rtype == T_VOID) { // We'll emit a _return with something on the stack. // It's OK to ignore what's on the stack. + } else if (rbt == T_INT && is_subword_type(_rtype)) { + // Convert value to match return type. + switch (_rtype) { + case T_BOOLEAN: { + // boolean is treated as a one-bit unsigned integer. + // Cf. API documentation: java/lang/invoke/MethodHandles.html#explicitCastArguments + ArgToken one = make_prim_constant(T_INT, &one_jvalue, CHECK_(zero)); + emit_load_constant(one); + emit_bc(Bytecodes::_iand); + break; + } + case T_BYTE: emit_bc(Bytecodes::_i2b); break; + case T_CHAR: emit_bc(Bytecodes::_i2c); break; + case T_SHORT: emit_bc(Bytecodes::_i2s); break; + default: ShouldNotReachHere(); + } + } else if (is_subword_type(rbt) && (is_subword_type(_rtype) || (_rtype == T_INT))) { + // The subword type was returned as an int and will be passed + // on as an int. } else { - tty->print_cr("*** rbt=%d != rtype=%d", rbt, _rtype); - assert(false, "IMPLEMENT ME"); + lose("unknown conversion", CHECK_(zero)); } } switch (_rtype) { @@ -1173,7 +1280,7 @@ constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const { methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const { - methodHandle nullHandle; + methodHandle empty; // Create a method that holds the generated bytecode. invokedynamic // has no receiver, normal MH calls do. int flags_bits; @@ -1182,13 +1289,16 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const { else flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC); - methodOop m_oop = oopFactory::new_method(bytecode_length(), - accessFlags_from(flags_bits), - 0, 0, 0, oopDesc::IsSafeConc, CHECK_(nullHandle)); - methodHandle m(THREAD, m_oop); - m_oop = NULL; // oop not GC safe + // Create a new method + methodHandle m; + { + methodOop m_oop = oopFactory::new_method(bytecode_length(), + accessFlags_from(flags_bits), + 0, 0, 0, oopDesc::IsSafeConc, CHECK_(empty)); + m = methodHandle(THREAD, m_oop); + } - constantPoolHandle cpool = get_constant_pool(CHECK_(nullHandle)); + constantPoolHandle cpool = get_constant_pool(CHECK_(empty)); m->set_constants(cpool()); m->set_name_index(_name_index); @@ -1203,16 +1313,34 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const { typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array()); m->set_exception_table(exception_handlers()); - // Set the carry bit of the invocation counter to force inlining of - // the adapter. - InvocationCounter* ic = m->invocation_counter(); - ic->set_carry_flag(); - // Rewrite the method and set up the constant pool cache. - objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle)); + objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(empty)); objArrayHandle methods(THREAD, m_array); methods->obj_at_put(0, m()); - Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(nullHandle)); // Use fake class. + Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. + + // Set the invocation counter's count to the invoke count of the + // original call site. + InvocationCounter* ic = m->invocation_counter(); + ic->set(InvocationCounter::wait_for_compile, _invoke_count); + + // Create a new MDO + { + methodDataOop mdo = oopFactory::new_methodData(m, CHECK_(empty)); + assert(m->method_data() == NULL, "there should not be an MDO yet"); + m->set_method_data(mdo); + + // Iterate over all profile data and set the count of the counter + // data entries to the original call site counter. + for (ProfileData* profile_data = mdo->first_data(); + mdo->is_valid(profile_data); + profile_data = mdo->next_data(profile_data)) { + if (profile_data->is_CounterData()) { + CounterData* counter_data = profile_data->as_CounterData(); + counter_data->set_count(_invoke_count); + } + } + } #ifndef PRODUCT if (TraceMethodHandles) { @@ -1228,7 +1356,6 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const { #ifndef PRODUCT -#if 0 // MH printer for debugging. class MethodHandlePrinter : public MethodHandleWalker { @@ -1236,6 +1363,7 @@ private: outputStream* _out; bool _verbose; int _temp_num; + int _param_state; stringStream _strbuf; const char* strbuf() { const char* s = _strbuf.as_string(); @@ -1243,14 +1371,21 @@ private: return s; } ArgToken token(const char* str) { - return (ArgToken) str; + jvalue string_con; + string_con.j = (intptr_t) str; + return ArgToken(tt_symbolic, T_LONG, string_con); + } + const char* string(ArgToken token) { + return (const char*) (intptr_t) token.get_jlong(); } void start_params() { + _param_state <<= 1; _out->print("("); } void end_params() { if (_verbose) _out->print("\n"); _out->print(") => {"); + _param_state >>= 1; } void put_type_name(BasicType type, klassOop tk, outputStream* s) { const char* kname = NULL; @@ -1270,9 +1405,10 @@ private: public: MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS) - : MethodHandleWalker(root, THREAD), + : MethodHandleWalker(root, false, THREAD), _out(out), _verbose(verbose), + _param_state(0), _temp_num(0) { start_params(); @@ -1280,9 +1416,10 @@ public: virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { if (argnum < 0) { end_params(); - return NULL; + return token("return"); } - if (argnum == 0) { + if ((_param_state & 1) == 0) { + _param_state |= 1; _out->print(_verbose ? "\n " : ""); } else { _out->print(_verbose ? ",\n " : ", "); @@ -1312,8 +1449,15 @@ public: java_lang_boxing_object::print(type, con, &_strbuf); return maybe_make_temp("constant", type, "k"); } - virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken src, TRAPS) { - _strbuf.print("%s(%s", Bytecodes::name(op), (const char*)src); + void print_bytecode_name(Bytecodes::Code op) { + if (Bytecodes::is_defined(op)) + _strbuf.print("%s", Bytecodes::name(op)); + else + _strbuf.print("bytecode_%d", (int) op); + } + virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) { + print_bytecode_name(op); + _strbuf.print("(%s", string(src)); if (tk != NULL) { _strbuf.print(", "); put_type_name(type, tk, &_strbuf); @@ -1321,8 +1465,8 @@ public: _strbuf.print(")"); return maybe_make_temp("convert", type, "v"); } - virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken base, ArgToken offset, TRAPS) { - _strbuf.print("%s(%s, %s", Bytecodes::name(op), (const char*)base, (const char*)offset); + virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) { + _strbuf.print("%s(%s, %s", Bytecodes::name(op), string(base), string(offset)); if (tk != NULL) { _strbuf.print(", "); put_type_name(type, tk, &_strbuf); @@ -1333,7 +1477,8 @@ public: virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) { - Symbol* name, sig; + Symbol* name; + Symbol* sig; if (m != NULL) { name = m->name(); sig = m->signature(); @@ -1343,7 +1488,7 @@ public: } _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string()); for (int i = 0; i < argc; i++) { - _strbuf.print("%s%s", (i > 0 ? ", " : ""), (const char*)argv[i]); + _strbuf.print("%s%s", (i > 0 ? ", " : ""), string(argv[i])); } _strbuf.print(")"); if (!tailcall) { @@ -1381,24 +1526,20 @@ public: if (HAS_PENDING_EXCEPTION) { oop ex = PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION; - out->print("\n*** "); - if (ex != Universe::virtual_machine_error_instance()) - ex->print_on(out); - else - out->print("lose: %s", printer.lose_message()); - out->print("\n}\n"); + out->print(" *** "); + if (printer.lose_message() != NULL) out->print("%s ", printer.lose_message()); + out->print("}"); } out->print("\n"); } }; -#endif // 0 extern "C" void print_method_handle(oop mh) { if (!mh->is_oop()) { - tty->print_cr("*** not a method handle: "INTPTR_FORMAT, (intptr_t)mh); + tty->print_cr("*** not a method handle: "PTR_FORMAT, (intptr_t)mh); } else if (java_lang_invoke_MethodHandle::is_instance(mh)) { - //MethodHandlePrinter::print(mh); + MethodHandlePrinter::print(mh); } else { tty->print("*** not a method handle: "); mh->print(); diff --git a/hotspot/src/share/vm/prims/methodHandleWalk.hpp b/hotspot/src/share/vm/prims/methodHandleWalk.hpp index 0136d9e3241..df2fe278f8c 100644 --- a/hotspot/src/share/vm/prims/methodHandleWalk.hpp +++ b/hotspot/src/share/vm/prims/methodHandleWalk.hpp @@ -113,6 +113,7 @@ public: tt_parameter, tt_temporary, tt_constant, + tt_symbolic, tt_illegal }; @@ -164,6 +165,10 @@ private: bool _for_invokedynamic; int _local_index; + // This array is kept in an unusual order, indexed by low-level "slot number". + // TOS is always _outgoing.at(0), so simple pushes and pops shift the whole _outgoing array. + // If there is a receiver in the current argument list, it is at _outgoing.at(_outgoing.length()-1). + // If a value at _outgoing.at(n) is T_LONG or T_DOUBLE, the value at _outgoing.at(n+1) is T_VOID. GrowableArray _outgoing; // current outgoing parameter slots int _outgoing_argc; // # non-empty outgoing slots @@ -173,6 +178,11 @@ private: // Insert or delete a second empty slot as needed. void change_argument(BasicType old_type, int slot, BasicType new_type, const ArgToken& new_arg); + // Raw retype conversions for OP_RAW_RETYPE. + void retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS); + void retype_raw_argument_type(BasicType src, BasicType dst, int slot, TRAPS) { retype_raw_conversion(src, dst, false, slot, CHECK); } + void retype_raw_return_type( BasicType src, BasicType dst, TRAPS) { retype_raw_conversion(src, dst, true, -1, CHECK); } + SlotState* slot_state(int slot) { if (slot < 0 || slot >= _outgoing.length()) return NULL; @@ -221,12 +231,12 @@ public: int max_locals() const { return _local_index; } // plug-in abstract interpretation steps: - virtual ArgToken make_parameter( BasicType type, klassOop tk, int argnum, TRAPS ) = 0; - virtual ArgToken make_prim_constant( BasicType type, jvalue* con, TRAPS ) = 0; - virtual ArgToken make_oop_constant( oop con, TRAPS ) = 0; - virtual ArgToken make_conversion( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS ) = 0; - virtual ArgToken make_fetch( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS ) = 0; - virtual ArgToken make_invoke( methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS ) = 0; + virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) = 0; + virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) = 0; + virtual ArgToken make_oop_constant(oop con, TRAPS) = 0; + virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0; + virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0; + virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0; // For make_invoke, the methodOop can be NULL if the intrinsic ID // is something other than vmIntrinsics::_none. @@ -247,11 +257,16 @@ public: class MethodHandleCompiler : public MethodHandleWalker { private: methodHandle _callee; + int _invoke_count; // count the original call site has been executed KlassHandle _rklass; // Return type for casting. BasicType _rtype; KlassHandle _target_klass; Thread* _thread; + // Values used by the compiler. + static jvalue zero_jvalue; + static jvalue one_jvalue; + // Fake constant pool entry. class ConstantValue { private: @@ -416,7 +431,7 @@ private: methodHandle get_method_oop(TRAPS) const; public: - MethodHandleCompiler(Handle root, methodHandle call_method, bool for_invokedynamic, TRAPS); + MethodHandleCompiler(Handle root, methodHandle callee, int invoke_count, bool for_invokedynamic, TRAPS); // Compile the given MH chain into bytecode. methodHandle compile(TRAPS); diff --git a/hotspot/src/share/vm/prims/methodHandles.cpp b/hotspot/src/share/vm/prims/methodHandles.cpp index 12a483f23b5..293a2771bb4 100644 --- a/hotspot/src/share/vm/prims/methodHandles.cpp +++ b/hotspot/src/share/vm/prims/methodHandles.cpp @@ -66,8 +66,8 @@ const char* MethodHandles::_entry_names[_EK_LIMIT+1] = { "adapter_drop_args", "adapter_collect_args", "adapter_spread_args", - "adapter_flyby", - "adapter_ricochet", + "adapter_fold_args", + "adapter_unused_13", // optimized adapter types: "adapter_swap_args/1", @@ -83,9 +83,76 @@ const char* MethodHandles::_entry_names[_EK_LIMIT+1] = { "adapter_prim_to_prim/f2d", "adapter_ref_to_prim/unboxi", "adapter_ref_to_prim/unboxl", - "adapter_spread_args/0", - "adapter_spread_args/1", - "adapter_spread_args/more", + + // return value handlers for collect/filter/fold adapters: + "return/ref", + "return/int", + "return/long", + "return/float", + "return/double", + "return/void", + "return/S0/ref", + "return/S1/ref", + "return/S2/ref", + "return/S3/ref", + "return/S4/ref", + "return/S5/ref", + "return/any", + + // spreading (array length cases 0, 1, ...) + "adapter_spread/0", + "adapter_spread/1/ref", + "adapter_spread/2/ref", + "adapter_spread/3/ref", + "adapter_spread/4/ref", + "adapter_spread/5/ref", + "adapter_spread/ref", + "adapter_spread/byte", + "adapter_spread/char", + "adapter_spread/short", + "adapter_spread/int", + "adapter_spread/long", + "adapter_spread/float", + "adapter_spread/double", + + // blocking filter/collect conversions: + "adapter_collect/ref", + "adapter_collect/int", + "adapter_collect/long", + "adapter_collect/float", + "adapter_collect/double", + "adapter_collect/void", + "adapter_collect/0/ref", + "adapter_collect/1/ref", + "adapter_collect/2/ref", + "adapter_collect/3/ref", + "adapter_collect/4/ref", + "adapter_collect/5/ref", + "adapter_filter/S0/ref", + "adapter_filter/S1/ref", + "adapter_filter/S2/ref", + "adapter_filter/S3/ref", + "adapter_filter/S4/ref", + "adapter_filter/S5/ref", + "adapter_collect/2/S0/ref", + "adapter_collect/2/S1/ref", + "adapter_collect/2/S2/ref", + "adapter_collect/2/S3/ref", + "adapter_collect/2/S4/ref", + "adapter_collect/2/S5/ref", + + // blocking fold conversions: + "adapter_fold/ref", + "adapter_fold/int", + "adapter_fold/long", + "adapter_fold/float", + "adapter_fold/double", + "adapter_fold/void", + "adapter_fold/1/ref", + "adapter_fold/2/ref", + "adapter_fold/3/ref", + "adapter_fold/4/ref", + "adapter_fold/5/ref", NULL }; @@ -96,13 +163,23 @@ int MethodHandles::_adapter_code_size = StubRoutines::meth jobject MethodHandles::_raise_exception_method; +address MethodHandles::_adapter_return_handlers[CONV_TYPE_MASK+1]; + #ifdef ASSERT bool MethodHandles::spot_check_entry_names() { assert(!strcmp(entry_name(_invokestatic_mh), "invokestatic"), ""); assert(!strcmp(entry_name(_bound_ref_mh), "bound_ref"), ""); assert(!strcmp(entry_name(_adapter_retype_only), "adapter_retype_only"), ""); - assert(!strcmp(entry_name(_adapter_ricochet), "adapter_ricochet"), ""); + assert(!strcmp(entry_name(_adapter_fold_args), "adapter_fold_args"), ""); assert(!strcmp(entry_name(_adapter_opt_unboxi), "adapter_ref_to_prim/unboxi"), ""); + assert(!strcmp(entry_name(_adapter_opt_spread_char), "adapter_spread/char"), ""); + assert(!strcmp(entry_name(_adapter_opt_spread_double), "adapter_spread/double"), ""); + assert(!strcmp(entry_name(_adapter_opt_collect_int), "adapter_collect/int"), ""); + assert(!strcmp(entry_name(_adapter_opt_collect_0_ref), "adapter_collect/0/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_collect_2_S3_ref), "adapter_collect/2/S3/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_filter_S5_ref), "adapter_filter/S5/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_fold_3_ref), "adapter_fold/3/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_fold_void), "adapter_fold/void"), ""); return true; } #endif @@ -112,6 +189,9 @@ bool MethodHandles::spot_check_entry_names() { // MethodHandles::generate_adapters // void MethodHandles::generate_adapters() { +#ifdef TARGET_ARCH_NYI_6939861 + if (FLAG_IS_DEFAULT(UseRicochetFrames)) UseRicochetFrames = false; +#endif if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL) return; assert(_adapter_code == NULL, "generate only once"); @@ -126,7 +206,6 @@ void MethodHandles::generate_adapters() { g.generate(); } - //------------------------------------------------------------------------------ // MethodHandlesAdapterGenerator::generate // @@ -135,12 +214,62 @@ void MethodHandlesAdapterGenerator::generate() { for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; ek < MethodHandles::_EK_LIMIT; ek = MethodHandles::EntryKind(1 + (int)ek)) { - StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); - MethodHandles::generate_method_handle_stub(_masm, ek); + if (MethodHandles::ek_supported(ek)) { + StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); + MethodHandles::generate_method_handle_stub(_masm, ek); + } } } +#ifdef TARGET_ARCH_NYI_6939861 +// these defs belong in methodHandles_.cpp +frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { + ShouldNotCallThis(); + return fr; +} +void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* f, const RegisterMap* reg_map) { + ShouldNotCallThis(); +} +#endif //TARGET_ARCH_NYI_6939861 + + +//------------------------------------------------------------------------------ +// MethodHandles::ek_supported +// +bool MethodHandles::ek_supported(MethodHandles::EntryKind ek) { + MethodHandles::EntryKind ek_orig = MethodHandles::ek_original_kind(ek); + switch (ek_orig) { + case _adapter_unused_13: + return false; // not defined yet + case _adapter_prim_to_ref: + return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF); + case _adapter_collect_args: + return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS); + case _adapter_fold_args: + return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS); + case _adapter_opt_return_any: + return UseRicochetFrames; +#ifdef TARGET_ARCH_NYI_6939861 + // ports before 6939861 supported only three kinds of spread ops + case _adapter_spread_args: + // restrict spreads to three kinds: + switch (ek) { + case _adapter_opt_spread_0: + case _adapter_opt_spread_1: + case _adapter_opt_spread_more: + break; + default: + return false; + break; + } + break; +#endif //TARGET_ARCH_NYI_6939861 + } + return true; +} + + void MethodHandles::set_enabled(bool z) { if (_enabled != z) { guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic"); @@ -970,6 +1099,14 @@ static oop object_java_mirror() { return Klass::cast(SystemDictionary::Object_klass())->java_mirror(); } +bool MethodHandles::is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst) { + if (src == T_FLOAT) return dst == T_INT; + if (src == T_INT) return dst == T_FLOAT; + if (src == T_DOUBLE) return dst == T_LONG; + if (src == T_LONG) return dst == T_DOUBLE; + return false; +} + bool MethodHandles::same_basic_type_for_arguments(BasicType src, BasicType dst, bool raw, @@ -996,10 +1133,8 @@ bool MethodHandles::same_basic_type_for_arguments(BasicType src, return true; // remaining case: byte fits in short } // allow float/fixed reinterpretation casts - if (src == T_FLOAT) return dst == T_INT; - if (src == T_INT) return dst == T_FLOAT; - if (src == T_DOUBLE) return dst == T_LONG; - if (src == T_LONG) return dst == T_DOUBLE; + if (is_float_fixed_reinterpretation_cast(src, dst)) + return true; return false; } @@ -1270,7 +1405,7 @@ const char* MethodHandles::check_argument_type_change(BasicType src_type, int argnum, bool raw) { const char* err = NULL; - bool for_return = (argnum < 0); + const bool for_return = (argnum < 0); // just in case: if (src_type == T_ARRAY) src_type = T_OBJECT; @@ -1279,17 +1414,17 @@ const char* MethodHandles::check_argument_type_change(BasicType src_type, // Produce some nice messages if VerifyMethodHandles is turned on: if (!same_basic_type_for_arguments(src_type, dst_type, raw, for_return)) { if (src_type == T_OBJECT) { - if (raw && dst_type == T_INT && is_always_null_type(src_klass)) - return NULL; // OK to convert a null pointer to a garbage int - err = ((argnum >= 0) + if (raw && is_java_primitive(dst_type)) + return NULL; // ref-to-prim discards ref and returns zero + err = (!for_return ? "type mismatch: passing a %s for method argument #%d, which expects primitive %s" : "type mismatch: returning a %s, but caller expects primitive %s"); } else if (dst_type == T_OBJECT) { - err = ((argnum >= 0) + err = (!for_return ? "type mismatch: passing a primitive %s for method argument #%d, which expects %s" : "type mismatch: returning a primitive %s, but caller expects %s"); } else { - err = ((argnum >= 0) + err = (!for_return ? "type mismatch: passing a %s for method argument #%d, which expects %s" : "type mismatch: returning a %s, but caller expects %s"); } @@ -1298,11 +1433,11 @@ const char* MethodHandles::check_argument_type_change(BasicType src_type, if (!class_cast_needed(dst_klass, src_klass)) { if (raw) return NULL; // reverse cast is OK; the MH target is trusted to enforce it - err = ((argnum >= 0) + err = (!for_return ? "cast required: passing a %s for method argument #%d, which expects %s" : "cast required: returning a %s, but caller expects %s"); } else { - err = ((argnum >= 0) + err = (!for_return ? "reference mismatch: passing a %s for method argument #%d, which expects %s" : "reference mismatch: returning a %s, but caller expects %s"); } @@ -1323,7 +1458,7 @@ const char* MethodHandles::check_argument_type_change(BasicType src_type, size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11); char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1); - if (argnum >= 0) { + if (!for_return) { assert(strstr(err, "%d") != NULL, ""); jio_snprintf(msg, msglen, err, src_name, argnum, dst_name); } else { @@ -1564,6 +1699,8 @@ void MethodHandles::init_BoundMethodHandle_with_receiver(Handle mh, if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); } java_lang_invoke_MethodHandle::init_vmslots(mh()); + int vmargslot = m->size_of_parameters() - 1; + assert(java_lang_invoke_BoundMethodHandle::vmargslot(mh()) == vmargslot, ""); if (VerifyMethodHandles) { verify_BoundMethodHandle_with_receiver(mh, m, CHECK); @@ -1642,14 +1779,9 @@ void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnu DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); if (direct_to_method) { assert(this_pushes == slots_pushed, "BMH pushes one or two stack slots"); - assert(slots_pushed <= MethodHandlePushLimit, ""); } else { int target_pushes = decode_MethodHandle_stack_pushes(target()); assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct"); - // do not blow the stack; use a Java-based adapter if this limit is exceeded - // FIXME - // if (slots_pushed + target_pushes > MethodHandlePushLimit) - // err = "too many bound parameters"; } } @@ -1672,10 +1804,11 @@ void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum, } java_lang_invoke_MethodHandle::init_vmslots(mh()); + int argslot = java_lang_invoke_BoundMethodHandle::vmargslot(mh()); if (VerifyMethodHandles) { int insert_after = argnum - 1; - verify_vmargslot(mh, insert_after, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK); + verify_vmargslot(mh, insert_after, argslot, CHECK); verify_vmslots(mh, CHECK); } @@ -1769,6 +1902,7 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { Handle target(THREAD, java_lang_invoke_AdapterMethodHandle::vmtarget(mh())); Handle src_mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); Handle dst_mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); + Handle arg_mtype; const char* err = NULL; @@ -1777,25 +1911,29 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { switch (ek) { case _adapter_check_cast: // target type of cast case _adapter_ref_to_prim: // wrapper type from which to unbox - case _adapter_prim_to_ref: // wrapper type to box into - case _adapter_collect_args: // array type to collect into case _adapter_spread_args: // array type to spread from if (!java_lang_Class::is_instance(argument()) || java_lang_Class::is_primitive(argument())) { err = "adapter requires argument of type java.lang.Class"; break; } - if (ek == _adapter_collect_args || - ek == _adapter_spread_args) { + if (ek == _adapter_spread_args) { // Make sure it is a suitable collection type. (Array, for now.) Klass* ak = Klass::cast(java_lang_Class::as_klassOop(argument())); - if (!ak->oop_is_objArray()) { - { err = "adapter requires argument of type java.lang.Class"; break; } - } + if (!ak->oop_is_array()) + { err = "spread adapter requires argument representing an array class"; break; } + BasicType et = arrayKlass::cast(ak->as_klassOop())->element_type(); + if (et != dest && stack_move <= 0) + { err = "spread adapter requires array class argument of correct type"; break; } } break; - case _adapter_flyby: - case _adapter_ricochet: + case _adapter_prim_to_ref: // boxer MH to use + case _adapter_collect_args: // method handle which collects the args + case _adapter_fold_args: // method handle which collects the args + if (!UseRicochetFrames) { + { err = "box/collect/fold operators are not supported"; break; } + } if (!java_lang_invoke_MethodHandle::is_instance(argument())) { err = "MethodHandle adapter argument required"; break; } + arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument())); break; default: if (argument.not_null()) @@ -1806,6 +1944,7 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { if (err == NULL) { // Check that the src/dest types are supplied if needed. + // Also check relevant parameter or return types. switch (ek) { case _adapter_check_cast: if (src != T_OBJECT || dest != T_OBJECT) { @@ -1828,8 +1967,7 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { } break; case _adapter_prim_to_ref: - if (!is_java_primitive(src) || dest != T_OBJECT - || argument() != Klass::cast(SystemDictionary::box_klass(src))->java_mirror()) { + if (!is_java_primitive(src) || dest != T_OBJECT) { err = "adapter requires primitive src conversion subfield"; break; } break; @@ -1840,14 +1978,12 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { err = "adapter requires src/dest conversion subfields for swap"; break; } int swap_size = type2size[src]; - oop src_mtype = java_lang_invoke_AdapterMethodHandle::type(mh()); - oop dest_mtype = java_lang_invoke_AdapterMethodHandle::type(target()); - int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(target()); + int slot_limit = java_lang_invoke_MethodHandle::vmslots(target()); int src_slot = argslot; int dest_slot = vminfo; bool rotate_up = (src_slot > dest_slot); // upward rotation int src_arg = argnum; - int dest_arg = argument_slot_to_argnum(dest_mtype, dest_slot); + int dest_arg = argument_slot_to_argnum(dst_mtype(), dest_slot); verify_vmargslot(mh, dest_arg, dest_slot, CHECK); if (!(dest_slot >= src_slot + swap_size) && !(src_slot >= dest_slot + swap_size)) { @@ -1855,8 +1991,8 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { } else if (ek == _adapter_swap_args && !(src_slot > dest_slot)) { err = "source of swap must be deeper in stack"; } else if (ek == _adapter_swap_args) { - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, dest_arg), - java_lang_invoke_MethodType::ptype(dest_mtype, src_arg), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg), + java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg), dest_arg); } else if (ek == _adapter_rot_args) { if (rotate_up) { @@ -1864,8 +2000,8 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { // rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot] // that is: [src_arg+1..dest_arg] --> [src_arg..dest_arg-1] for (int i = src_arg+1; i <= dest_arg && err == NULL; i++) { - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i), - java_lang_invoke_MethodType::ptype(dest_mtype, i-1), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), + java_lang_invoke_MethodType::ptype(dst_mtype(), i-1), i); } } else { // rotate down @@ -1873,28 +2009,54 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { // rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss] // that is: [dest_arg..src_arg-1] --> [dst_arg+1..src_arg] for (int i = dest_arg; i <= src_arg-1 && err == NULL; i++) { - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i), - java_lang_invoke_MethodType::ptype(dest_mtype, i+1), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), + java_lang_invoke_MethodType::ptype(dst_mtype(), i+1), i); } } } if (err == NULL) - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, src_arg), - java_lang_invoke_MethodType::ptype(dest_mtype, dest_arg), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg), + java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg), src_arg); } break; - case _adapter_collect_args: case _adapter_spread_args: + case _adapter_collect_args: + case _adapter_fold_args: { - BasicType coll_type = (ek == _adapter_collect_args) ? dest : src; - BasicType elem_type = (ek == _adapter_collect_args) ? src : dest; - if (coll_type != T_OBJECT || elem_type != T_OBJECT) { - err = "adapter requires src/dest subfields"; break; - // later: - // - consider making coll be a primitive array - // - consider making coll be a heterogeneous collection + bool is_spread = (ek == _adapter_spread_args); + bool is_fold = (ek == _adapter_fold_args); + BasicType coll_type = is_spread ? src : dest; + BasicType elem_type = is_spread ? dest : src; + // coll_type is type of args in collected form (or T_VOID if none) + // elem_type is common type of args in spread form (or T_VOID if missing or heterogeneous) + if (coll_type == 0 || elem_type == 0) { + err = "adapter requires src/dest subfields for spread or collect"; break; + } + if (is_spread && coll_type != T_OBJECT) { + err = "spread adapter requires object type for argument bundle"; break; + } + Handle spread_mtype = (is_spread ? dst_mtype : src_mtype); + int spread_slot = argslot; + int spread_arg = argnum; + int slots_pushed = stack_move / stack_move_unit(); + int coll_slot_count = type2size[coll_type]; + int spread_slot_count = (is_spread ? slots_pushed : -slots_pushed) + coll_slot_count; + if (is_fold) spread_slot_count = argument_slot_count(arg_mtype()); + if (!is_spread) { + int init_slots = argument_slot_count(src_mtype()); + int coll_slots = argument_slot_count(arg_mtype()); + if (spread_slot_count > init_slots || + spread_slot_count != coll_slots) { + err = "collect adapter has inconsistent arg counts"; break; + } + int next_slots = argument_slot_count(dst_mtype()); + int unchanged_slots_in = (init_slots - spread_slot_count); + int unchanged_slots_out = (next_slots - coll_slot_count - (is_fold ? spread_slot_count : 0)); + if (unchanged_slots_in != unchanged_slots_out) { + err = "collect adapter continuation has inconsistent arg counts"; break; + } } } break; @@ -1929,8 +2091,9 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { } break; case _adapter_collect_args: - if (slots_pushed > 1) { - err = "adapter requires conversion subfield slots_pushed <= 1"; + case _adapter_fold_args: + if (slots_pushed > 2) { + err = "adapter requires conversion subfield slots_pushed <= 2"; } break; case _adapter_spread_args: @@ -1950,32 +2113,36 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { } if (err == NULL) { - // Make sure this adapter does not push too deeply. + // Make sure this adapter's stack pushing is accurately recorded. int slots_pushed = stack_move / stack_move_unit(); int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh()); int target_vmslots = java_lang_invoke_MethodHandle::vmslots(target()); + int target_pushes = decode_MethodHandle_stack_pushes(target()); if (slots_pushed != (target_vmslots - this_vmslots)) { err = "stack_move inconsistent with previous and current MethodType vmslots"; - } else if (slots_pushed > 0) { - // verify stack_move against MethodHandlePushLimit - int target_pushes = decode_MethodHandle_stack_pushes(target()); - // do not blow the stack; use a Java-based adapter if this limit is exceeded - if (slots_pushed + target_pushes > MethodHandlePushLimit) { - err = "adapter pushes too many parameters"; + } else { + int this_pushes = decode_MethodHandle_stack_pushes(mh()); + if (slots_pushed + target_pushes != this_pushes) { + if (this_pushes == 0) + err = "adapter push count not initialized"; + else + err = "adapter push count is wrong"; } } // While we're at it, check that the stack motion decoder works: - DEBUG_ONLY(int target_pushes = decode_MethodHandle_stack_pushes(target())); DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); assert(this_pushes == slots_pushed + target_pushes, "AMH stack motion must be correct"); } if (err == NULL && vminfo != 0) { switch (ek) { - case _adapter_swap_args: - case _adapter_rot_args: - break; // OK + case _adapter_swap_args: + case _adapter_rot_args: + case _adapter_prim_to_ref: + case _adapter_collect_args: + case _adapter_fold_args: + break; // OK default: err = "vminfo subfield is reserved to the JVM"; } @@ -2019,13 +2186,15 @@ void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { } void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { - int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); - jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); - jint conv_op = adapter_conversion_op(conversion); + Handle argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); + int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); + jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); + jint conv_op = adapter_conversion_op(conversion); // adjust the adapter code to the internal EntryKind enumeration: EntryKind ek_orig = adapter_entry_kind(conv_op); EntryKind ek_opt = ek_orig; // may be optimized + EntryKind ek_try; // temp // Finalize the vmtarget field (Java initialized it to null). if (!java_lang_invoke_MethodHandle::is_instance(target())) { @@ -2034,17 +2203,23 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu } java_lang_invoke_AdapterMethodHandle::set_vmtarget(mh(), target()); - if (VerifyMethodHandles) { - verify_AdapterMethodHandle(mh, argnum, CHECK); - } - int stack_move = adapter_conversion_stack_move(conversion); BasicType src = adapter_conversion_src_type(conversion); BasicType dest = adapter_conversion_dest_type(conversion); int vminfo = adapter_conversion_vminfo(conversion); // should be zero + int slots_pushed = stack_move / stack_move_unit(); + + if (VerifyMethodHandles) { + verify_AdapterMethodHandle(mh, argnum, CHECK); + } + const char* err = NULL; + if (!conv_op_supported(conv_op)) { + err = "adapter not yet implemented in the JVM"; + } + // Now it's time to finish the case analysis and pick a MethodHandleEntry. switch (ek_orig) { case _adapter_retype_only: @@ -2073,20 +2248,20 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu } else if (src == T_DOUBLE && dest == T_FLOAT) { ek_opt = _adapter_opt_d2f; } else { - assert(false, ""); + goto throw_not_impl; // runs user code, hence could block } break; case 1 *4+ 2: - if (src == T_INT && dest == T_LONG) { + if ((src == T_INT || is_subword_type(src)) && dest == T_LONG) { ek_opt = _adapter_opt_i2l; } else if (src == T_FLOAT && dest == T_DOUBLE) { ek_opt = _adapter_opt_f2d; } else { - assert(false, ""); + goto throw_not_impl; // runs user code, hence could block } break; default: - assert(false, ""); + goto throw_not_impl; // runs user code, hence could block break; } } @@ -2103,14 +2278,54 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu ek_opt = _adapter_opt_unboxl; break; default: - assert(false, ""); + goto throw_not_impl; break; } } break; case _adapter_prim_to_ref: - goto throw_not_impl; // allocates, hence could block + { + assert(UseRicochetFrames, "else don't come here"); + // vminfo will be the location to insert the return value + vminfo = argslot; + ek_opt = _adapter_opt_collect_ref; + ensure_vmlayout_field(target, CHECK); + // for MethodHandleWalk: + if (java_lang_invoke_AdapterMethodHandle::is_instance(argument())) + ensure_vmlayout_field(argument, CHECK); + if (!OptimizeMethodHandles) break; + switch (type2size[src]) { + case 1: + ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == 1 && + ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); + ek_opt = ek_try; + break; + } + // else downgrade to variable slot: + ek_opt = _adapter_opt_collect_1_ref; + break; + case 2: + ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == 2 && + ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); + ek_opt = ek_try; + break; + } + // else downgrade to variable slot: + ek_opt = _adapter_opt_collect_2_ref; + break; + default: + goto throw_not_impl; + break; + } + } + break; case _adapter_swap_args: case _adapter_rot_args: @@ -2130,35 +2345,184 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu rotate > 0 ? _adapter_opt_rot_2_up : _adapter_opt_rot_2_down); break; default: - assert(false, ""); + goto throw_not_impl; break; } } break; - case _adapter_collect_args: - goto throw_not_impl; // allocates, hence could block - case _adapter_spread_args: { - // vminfo will be the required length of the array - int slots_pushed = stack_move / stack_move_unit(); - int array_size = slots_pushed + 1; - assert(array_size >= 0, ""); - vminfo = array_size; - switch (array_size) { - case 0: ek_opt = _adapter_opt_spread_0; break; - case 1: ek_opt = _adapter_opt_spread_1; break; - default: ek_opt = _adapter_opt_spread_more; break; +#ifdef TARGET_ARCH_NYI_6939861 + // ports before 6939861 supported only three kinds of spread ops + if (!UseRicochetFrames) { + int array_size = slots_pushed + 1; + assert(array_size >= 0, ""); + vminfo = array_size; + switch (array_size) { + case 0: ek_opt = _adapter_opt_spread_0; break; + case 1: ek_opt = _adapter_opt_spread_1; break; + default: ek_opt = _adapter_opt_spread_more; break; + } + break; } - if ((vminfo & CONV_VMINFO_MASK) != vminfo) - goto throw_not_impl; // overflow +#endif //TARGET_ARCH_NYI_6939861 + // vminfo will be the required length of the array + int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1); + vminfo = array_size; + // general case + switch (dest) { + case T_BOOLEAN : // fall through to T_BYTE: + case T_BYTE : ek_opt = _adapter_opt_spread_byte; break; + case T_CHAR : ek_opt = _adapter_opt_spread_char; break; + case T_SHORT : ek_opt = _adapter_opt_spread_short; break; + case T_INT : ek_opt = _adapter_opt_spread_int; break; + case T_LONG : ek_opt = _adapter_opt_spread_long; break; + case T_FLOAT : ek_opt = _adapter_opt_spread_float; break; + case T_DOUBLE : ek_opt = _adapter_opt_spread_double; break; + case T_OBJECT : ek_opt = _adapter_opt_spread_ref; break; + case T_VOID : if (array_size != 0) goto throw_not_impl; + ek_opt = _adapter_opt_spread_ref; break; + default : goto throw_not_impl; + } + assert(array_size == 0 || // it doesn't matter what the spreader is + (ek_adapter_opt_spread_count(ek_opt) == -1 && + (ek_adapter_opt_spread_type(ek_opt) == dest || + (ek_adapter_opt_spread_type(ek_opt) == T_BYTE && dest == T_BOOLEAN))), + err_msg("dest=%d ek_opt=%d", dest, ek_opt)); + + if (array_size <= 0) { + // since the general case does not handle length 0, this case is required: + ek_opt = _adapter_opt_spread_0; + break; + } + if (dest == T_OBJECT) { + ek_try = EntryKind(_adapter_opt_spread_1_ref - 1 + array_size); + if (ek_try < _adapter_opt_spread_LAST && + ek_adapter_opt_spread_count(ek_try) == array_size) { + assert(ek_adapter_opt_spread_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + } + break; } break; - case _adapter_flyby: - case _adapter_ricochet: - goto throw_not_impl; // runs Java code, hence could block + case _adapter_collect_args: + { + assert(UseRicochetFrames, "else don't come here"); + int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); + // vminfo will be the location to insert the return value + vminfo = argslot; + ensure_vmlayout_field(target, CHECK); + ensure_vmlayout_field(argument, CHECK); + + // general case: + switch (dest) { + default : if (!is_subword_type(dest)) goto throw_not_impl; + // else fall through: + case T_INT : ek_opt = _adapter_opt_collect_int; break; + case T_LONG : ek_opt = _adapter_opt_collect_long; break; + case T_FLOAT : ek_opt = _adapter_opt_collect_float; break; + case T_DOUBLE : ek_opt = _adapter_opt_collect_double; break; + case T_OBJECT : ek_opt = _adapter_opt_collect_ref; break; + case T_VOID : ek_opt = _adapter_opt_collect_void; break; + } + assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && + ek_adapter_opt_collect_count(ek_opt) == -1 && + (ek_adapter_opt_collect_type(ek_opt) == dest || + ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), + ""); + + if (dest == T_OBJECT && elem_slots == 1 && OptimizeMethodHandles) { + // filter operation on a ref + ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + ek_opt = _adapter_opt_collect_1_ref; + break; + } + + if (dest == T_OBJECT && elem_slots == 2 && OptimizeMethodHandles) { + // filter of two arguments + ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + ek_opt = _adapter_opt_collect_2_ref; + break; + } + + if (dest == T_OBJECT && OptimizeMethodHandles) { + // try to use a fixed length adapter + ek_try = EntryKind(_adapter_opt_collect_0_ref + elem_slots); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_count(ek_try) == elem_slots) { + assert(ek_adapter_opt_collect_slot(ek_try) == -1 && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + } + + break; + } + + case _adapter_fold_args: + { + assert(UseRicochetFrames, "else don't come here"); + int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); + // vminfo will be the location to insert the return value + vminfo = argslot + elem_slots; + ensure_vmlayout_field(target, CHECK); + ensure_vmlayout_field(argument, CHECK); + + switch (dest) { + default : if (!is_subword_type(dest)) goto throw_not_impl; + // else fall through: + case T_INT : ek_opt = _adapter_opt_fold_int; break; + case T_LONG : ek_opt = _adapter_opt_fold_long; break; + case T_FLOAT : ek_opt = _adapter_opt_fold_float; break; + case T_DOUBLE : ek_opt = _adapter_opt_fold_double; break; + case T_OBJECT : ek_opt = _adapter_opt_fold_ref; break; + case T_VOID : ek_opt = _adapter_opt_fold_void; break; + } + assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && + ek_adapter_opt_collect_count(ek_opt) == -1 && + (ek_adapter_opt_collect_type(ek_opt) == dest || + ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), + ""); + + if (dest == T_OBJECT && elem_slots == 0 && OptimizeMethodHandles) { + // if there are no args, just pretend it's a collect + ek_opt = _adapter_opt_collect_0_ref; + break; + } + + if (dest == T_OBJECT && OptimizeMethodHandles) { + // try to use a fixed length adapter + ek_try = EntryKind(_adapter_opt_fold_1_ref - 1 + elem_slots); + if (ek_try < _adapter_opt_fold_LAST && + ek_adapter_opt_collect_count(ek_try) == elem_slots) { + assert(ek_adapter_opt_collect_slot(ek_try) == -1 && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + } + + break; + } default: // should have failed much earlier; must be a missing case here @@ -2166,11 +2530,36 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu // and fall through: throw_not_impl: - // FIXME: these adapters are NYI - err = "adapter not yet implemented in the JVM"; + if (err == NULL) + err = "unknown adapter type"; break; } + if (err == NULL && (vminfo & CONV_VMINFO_MASK) != vminfo) { + // should not happen, since vminfo is used to encode arg/slot indexes < 255 + err = "vminfo overflow"; + } + + if (err == NULL && !have_entry(ek_opt)) { + err = "adapter stub for this kind of method handle is missing"; + } + + if (err == NULL && ek_opt == ek_orig) { + switch (ek_opt) { + case _adapter_prim_to_prim: + case _adapter_ref_to_prim: + case _adapter_prim_to_ref: + case _adapter_swap_args: + case _adapter_rot_args: + case _adapter_collect_args: + case _adapter_fold_args: + case _adapter_spread_args: + // should be handled completely by optimized cases; see above + err = "init_AdapterMethodHandle should not issue this"; + break; + } + } + if (err != NULL) { throw_InternalError_for_bad_conversion(conversion, err, THREAD); return; @@ -2190,6 +2579,26 @@ void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnu // Java code can publish it in global data structures. } +void MethodHandles::ensure_vmlayout_field(Handle target, TRAPS) { + Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); + Handle mtform(THREAD, java_lang_invoke_MethodType::form(mtype())); + if (mtform.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } + if (java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { + if (java_lang_invoke_MethodTypeForm::vmlayout(mtform()) == NULL) { + // fill it in + Handle erased_mtype(THREAD, java_lang_invoke_MethodTypeForm::erasedType(mtform())); + TempNewSymbol erased_signature + = java_lang_invoke_MethodType::as_signature(erased_mtype(), /*intern:*/true, CHECK); + methodOop cookie + = SystemDictionary::find_method_handle_invoke(vmSymbols::invokeExact_name(), + erased_signature, + SystemDictionaryHandles::Object_klass(), + THREAD); + java_lang_invoke_MethodTypeForm::init_vmlayout(mtform(), cookie); + } + } +} + // // Here are the native methods on sun.invoke.MethodHandleImpl. // They are the private interface between this JVM and the HotSpot-specific @@ -2360,8 +2769,10 @@ JVM_END #ifndef PRODUCT #define EACH_NAMED_CON(template) \ - template(MethodHandles,GC_JVM_PUSH_LIMIT) \ - template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) \ + /* hold back this one until JDK stabilizes */ \ + /* template(MethodHandles,GC_JVM_PUSH_LIMIT) */ \ + /* hold back this one until JDK stabilizes */ \ + /* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */ \ template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \ template(MethodHandles,ETF_DIRECT_HANDLE) \ template(MethodHandles,ETF_METHOD_NAME) \ @@ -2385,9 +2796,8 @@ JVM_END template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \ template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \ template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \ - template(java_lang_invoke_AdapterMethodHandle,OP_FLYBY) \ - template(java_lang_invoke_AdapterMethodHandle,OP_RICOCHET) \ - template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT) \ + /* hold back this one until JDK stabilizes */ \ + /*template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT)*/ \ template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \ template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \ template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ diff --git a/hotspot/src/share/vm/prims/methodHandles.hpp b/hotspot/src/share/vm/prims/methodHandles.hpp index d104783d40f..66de7efe0f4 100644 --- a/hotspot/src/share/vm/prims/methodHandles.hpp +++ b/hotspot/src/share/vm/prims/methodHandles.hpp @@ -66,8 +66,8 @@ class MethodHandles: AllStatic { _adapter_drop_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS, _adapter_collect_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS, _adapter_spread_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS, - _adapter_flyby = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FLYBY, - _adapter_ricochet = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RICOCHET, + _adapter_fold_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS, + _adapter_unused_13 = _adapter_mh_first + 13, //hole in the CONV_OP enumeration _adapter_mh_last = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT - 1, // Optimized adapter types @@ -93,10 +93,99 @@ class MethodHandles: AllStatic { _adapter_opt_unboxi, _adapter_opt_unboxl, - // spreading (array length cases 0, 1, >=2) - _adapter_opt_spread_0, - _adapter_opt_spread_1, - _adapter_opt_spread_more, + // %% Maybe tame the following with a VM_SYMBOLS_DO type macro? + + // how a blocking adapter returns (platform-dependent) + _adapter_opt_return_ref, + _adapter_opt_return_int, + _adapter_opt_return_long, + _adapter_opt_return_float, + _adapter_opt_return_double, + _adapter_opt_return_void, + _adapter_opt_return_S0_ref, // return ref to S=0 (last slot) + _adapter_opt_return_S1_ref, // return ref to S=1 (2nd-to-last slot) + _adapter_opt_return_S2_ref, + _adapter_opt_return_S3_ref, + _adapter_opt_return_S4_ref, + _adapter_opt_return_S5_ref, + _adapter_opt_return_any, // dynamically select r/i/l/f/d + _adapter_opt_return_FIRST = _adapter_opt_return_ref, + _adapter_opt_return_LAST = _adapter_opt_return_any, + + // spreading (array length cases 0, 1, ...) + _adapter_opt_spread_0, // spread empty array to N=0 arguments + _adapter_opt_spread_1_ref, // spread Object[] to N=1 argument + _adapter_opt_spread_2_ref, // spread Object[] to N=2 arguments + _adapter_opt_spread_3_ref, // spread Object[] to N=3 arguments + _adapter_opt_spread_4_ref, // spread Object[] to N=4 arguments + _adapter_opt_spread_5_ref, // spread Object[] to N=5 arguments + _adapter_opt_spread_ref, // spread Object[] to N arguments + _adapter_opt_spread_byte, // spread byte[] or boolean[] to N arguments + _adapter_opt_spread_char, // spread char[], etc., to N arguments + _adapter_opt_spread_short, // spread short[], etc., to N arguments + _adapter_opt_spread_int, // spread int[], short[], etc., to N arguments + _adapter_opt_spread_long, // spread long[] to N arguments + _adapter_opt_spread_float, // spread float[] to N arguments + _adapter_opt_spread_double, // spread double[] to N arguments + _adapter_opt_spread_FIRST = _adapter_opt_spread_0, + _adapter_opt_spread_LAST = _adapter_opt_spread_double, + + // blocking filter/collect conversions + // These collect N arguments and replace them (at slot S) by a return value + // which is passed to the final target, along with the unaffected arguments. + // collect_{N}_{T} collects N arguments at any position into a T value + // collect_{N}_S{S}_{T} collects N arguments at slot S into a T value + // collect_{T} collects any number of arguments at any position + // filter_S{S}_{T} is the same as collect_1_S{S}_{T} (a unary collection) + // (collect_2 is also usable as a filter, with long or double arguments) + _adapter_opt_collect_ref, // combine N arguments, replace with a reference + _adapter_opt_collect_int, // combine N arguments, replace with an int, short, etc. + _adapter_opt_collect_long, // combine N arguments, replace with a long + _adapter_opt_collect_float, // combine N arguments, replace with a float + _adapter_opt_collect_double, // combine N arguments, replace with a double + _adapter_opt_collect_void, // combine N arguments, replace with nothing + // if there is a small fixed number to push, do so without a loop: + _adapter_opt_collect_0_ref, // collect N=0 arguments, insert a reference + _adapter_opt_collect_1_ref, // collect N=1 argument, replace with a reference + _adapter_opt_collect_2_ref, // combine N=2 arguments, replace with a reference + _adapter_opt_collect_3_ref, // combine N=3 arguments, replace with a reference + _adapter_opt_collect_4_ref, // combine N=4 arguments, replace with a reference + _adapter_opt_collect_5_ref, // combine N=5 arguments, replace with a reference + // filters are an important special case because they never move arguments: + _adapter_opt_filter_S0_ref, // filter N=1 argument at S=0, replace with a reference + _adapter_opt_filter_S1_ref, // filter N=1 argument at S=1, replace with a reference + _adapter_opt_filter_S2_ref, // filter N=1 argument at S=2, replace with a reference + _adapter_opt_filter_S3_ref, // filter N=1 argument at S=3, replace with a reference + _adapter_opt_filter_S4_ref, // filter N=1 argument at S=4, replace with a reference + _adapter_opt_filter_S5_ref, // filter N=1 argument at S=5, replace with a reference + // these move arguments, but they are important for boxing + _adapter_opt_collect_2_S0_ref, // combine last N=2 arguments, replace with a reference + _adapter_opt_collect_2_S1_ref, // combine N=2 arguments at S=1, replace with a reference + _adapter_opt_collect_2_S2_ref, // combine N=2 arguments at S=2, replace with a reference + _adapter_opt_collect_2_S3_ref, // combine N=2 arguments at S=3, replace with a reference + _adapter_opt_collect_2_S4_ref, // combine N=2 arguments at S=4, replace with a reference + _adapter_opt_collect_2_S5_ref, // combine N=2 arguments at S=5, replace with a reference + _adapter_opt_collect_FIRST = _adapter_opt_collect_ref, + _adapter_opt_collect_LAST = _adapter_opt_collect_2_S5_ref, + + // blocking folding conversions + // these are like collects, but retain all the N arguments for the final target + //_adapter_opt_fold_0_ref, // same as _adapter_opt_collect_0_ref + // fold_{N}_{T} processes N arguments at any position into a T value, which it inserts + // fold_{T} processes any number of arguments at any position + _adapter_opt_fold_ref, // process N arguments, prepend a reference + _adapter_opt_fold_int, // process N arguments, prepend an int, short, etc. + _adapter_opt_fold_long, // process N arguments, prepend a long + _adapter_opt_fold_float, // process N arguments, prepend a float + _adapter_opt_fold_double, // process N arguments, prepend a double + _adapter_opt_fold_void, // process N arguments, but leave the list unchanged + _adapter_opt_fold_1_ref, // process N=1 argument, prepend a reference + _adapter_opt_fold_2_ref, // process N=2 arguments, prepend a reference + _adapter_opt_fold_3_ref, // process N=3 arguments, prepend a reference + _adapter_opt_fold_4_ref, // process N=4 arguments, prepend a reference + _adapter_opt_fold_5_ref, // process N=5 arguments, prepend a reference + _adapter_opt_fold_FIRST = _adapter_opt_fold_ref, + _adapter_opt_fold_LAST = _adapter_opt_fold_5_ref, _EK_LIMIT, _EK_FIRST = 0 @@ -110,6 +199,7 @@ class MethodHandles: AllStatic { enum { // import java_lang_invoke_AdapterMethodHandle::CONV_OP_* CONV_OP_LIMIT = java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT, CONV_OP_MASK = java_lang_invoke_AdapterMethodHandle::CONV_OP_MASK, + CONV_TYPE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_TYPE_MASK, CONV_VMINFO_MASK = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_MASK, CONV_VMINFO_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_SHIFT, CONV_OP_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_OP_SHIFT, @@ -123,6 +213,7 @@ class MethodHandles: AllStatic { static MethodHandleEntry* _entries[_EK_LIMIT]; static const char* _entry_names[_EK_LIMIT+1]; static jobject _raise_exception_method; + static address _adapter_return_handlers[CONV_TYPE_MASK+1]; // Adapters. static MethodHandlesAdapterBlob* _adapter_code; @@ -147,39 +238,195 @@ class MethodHandles: AllStatic { } // Some adapter helper functions. - static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) { + static EntryKind ek_original_kind(EntryKind ek) { + if (ek <= _adapter_mh_last) return ek; switch (ek) { - case _bound_int_mh : // fall-thru - case _bound_int_direct_mh : arg_type = T_INT; arg_mask = _INSERT_INT_MASK; break; - case _bound_long_mh : // fall-thru - case _bound_long_direct_mh: arg_type = T_LONG; arg_mask = _INSERT_LONG_MASK; break; - case _bound_ref_mh : // fall-thru - case _bound_ref_direct_mh : arg_type = T_OBJECT; arg_mask = _INSERT_REF_MASK; break; - default: ShouldNotReachHere(); + case _adapter_opt_swap_1: + case _adapter_opt_swap_2: + return _adapter_swap_args; + case _adapter_opt_rot_1_up: + case _adapter_opt_rot_1_down: + case _adapter_opt_rot_2_up: + case _adapter_opt_rot_2_down: + return _adapter_rot_args; + case _adapter_opt_i2i: + case _adapter_opt_l2i: + case _adapter_opt_d2f: + case _adapter_opt_i2l: + case _adapter_opt_f2d: + return _adapter_prim_to_prim; + case _adapter_opt_unboxi: + case _adapter_opt_unboxl: + return _adapter_ref_to_prim; } - arg_slots = type2size[arg_type]; + if (ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST) + return _adapter_spread_args; + if (ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST) + return _adapter_collect_args; + if (ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST) + return _adapter_fold_args; + if (ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST) + return _adapter_opt_return_any; + assert(false, "oob"); + return _EK_LIMIT; } - static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) { - int swap_slots = 0; + static bool ek_supported(MethodHandles::EntryKind ek); + + static BasicType ek_bound_mh_arg_type(EntryKind ek) { switch (ek) { - case _adapter_opt_swap_1: swap_slots = 1; rotate = 0; break; - case _adapter_opt_swap_2: swap_slots = 2; rotate = 0; break; - case _adapter_opt_rot_1_up: swap_slots = 1; rotate = 1; break; - case _adapter_opt_rot_1_down: swap_slots = 1; rotate = -1; break; - case _adapter_opt_rot_2_up: swap_slots = 2; rotate = 1; break; - case _adapter_opt_rot_2_down: swap_slots = 2; rotate = -1; break; - default: ShouldNotReachHere(); + case _bound_int_mh : // fall-thru + case _bound_int_direct_mh : return T_INT; + case _bound_long_mh : // fall-thru + case _bound_long_direct_mh : return T_LONG; + default : return T_OBJECT; } - // Return the size of the stack slots to move in bytes. - swap_bytes = swap_slots * Interpreter::stackElementSize; } - static int get_ek_adapter_opt_spread_info(EntryKind ek) { + static int ek_adapter_opt_swap_slots(EntryKind ek) { switch (ek) { - case _adapter_opt_spread_0: return 0; - case _adapter_opt_spread_1: return 1; - default : return -1; + case _adapter_opt_swap_1 : return 1; + case _adapter_opt_swap_2 : return 2; + case _adapter_opt_rot_1_up : return 1; + case _adapter_opt_rot_1_down : return 1; + case _adapter_opt_rot_2_up : return 2; + case _adapter_opt_rot_2_down : return 2; + default : ShouldNotReachHere(); return -1; + } + } + + static int ek_adapter_opt_swap_mode(EntryKind ek) { + switch (ek) { + case _adapter_opt_swap_1 : return 0; + case _adapter_opt_swap_2 : return 0; + case _adapter_opt_rot_1_up : return 1; + case _adapter_opt_rot_1_down : return -1; + case _adapter_opt_rot_2_up : return 1; + case _adapter_opt_rot_2_down : return -1; + default : ShouldNotReachHere(); return 0; + } + } + + static int ek_adapter_opt_collect_count(EntryKind ek) { + assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || + ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); + switch (ek) { + case _adapter_opt_collect_0_ref : return 0; + case _adapter_opt_filter_S0_ref : + case _adapter_opt_filter_S1_ref : + case _adapter_opt_filter_S2_ref : + case _adapter_opt_filter_S3_ref : + case _adapter_opt_filter_S4_ref : + case _adapter_opt_filter_S5_ref : + case _adapter_opt_fold_1_ref : + case _adapter_opt_collect_1_ref : return 1; + case _adapter_opt_collect_2_S0_ref : + case _adapter_opt_collect_2_S1_ref : + case _adapter_opt_collect_2_S2_ref : + case _adapter_opt_collect_2_S3_ref : + case _adapter_opt_collect_2_S4_ref : + case _adapter_opt_collect_2_S5_ref : + case _adapter_opt_fold_2_ref : + case _adapter_opt_collect_2_ref : return 2; + case _adapter_opt_fold_3_ref : + case _adapter_opt_collect_3_ref : return 3; + case _adapter_opt_fold_4_ref : + case _adapter_opt_collect_4_ref : return 4; + case _adapter_opt_fold_5_ref : + case _adapter_opt_collect_5_ref : return 5; + default : return -1; // sentinel value for "variable" + } + } + + static int ek_adapter_opt_collect_slot(EntryKind ek) { + assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || + ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); + switch (ek) { + case _adapter_opt_collect_2_S0_ref : + case _adapter_opt_filter_S0_ref : return 0; + case _adapter_opt_collect_2_S1_ref : + case _adapter_opt_filter_S1_ref : return 1; + case _adapter_opt_collect_2_S2_ref : + case _adapter_opt_filter_S2_ref : return 2; + case _adapter_opt_collect_2_S3_ref : + case _adapter_opt_filter_S3_ref : return 3; + case _adapter_opt_collect_2_S4_ref : + case _adapter_opt_filter_S4_ref : return 4; + case _adapter_opt_collect_2_S5_ref : + case _adapter_opt_filter_S5_ref : return 5; + default : return -1; // sentinel value for "variable" + } + } + + static BasicType ek_adapter_opt_collect_type(EntryKind ek) { + assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || + ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); + switch (ek) { + case _adapter_opt_fold_int : + case _adapter_opt_collect_int : return T_INT; + case _adapter_opt_fold_long : + case _adapter_opt_collect_long : return T_LONG; + case _adapter_opt_fold_float : + case _adapter_opt_collect_float : return T_FLOAT; + case _adapter_opt_fold_double : + case _adapter_opt_collect_double : return T_DOUBLE; + case _adapter_opt_fold_void : + case _adapter_opt_collect_void : return T_VOID; + default : return T_OBJECT; + } + } + + static int ek_adapter_opt_return_slot(EntryKind ek) { + assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); + switch (ek) { + case _adapter_opt_return_S0_ref : return 0; + case _adapter_opt_return_S1_ref : return 1; + case _adapter_opt_return_S2_ref : return 2; + case _adapter_opt_return_S3_ref : return 3; + case _adapter_opt_return_S4_ref : return 4; + case _adapter_opt_return_S5_ref : return 5; + default : return -1; // sentinel value for "variable" + } + } + + static BasicType ek_adapter_opt_return_type(EntryKind ek) { + assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); + switch (ek) { + case _adapter_opt_return_int : return T_INT; + case _adapter_opt_return_long : return T_LONG; + case _adapter_opt_return_float : return T_FLOAT; + case _adapter_opt_return_double : return T_DOUBLE; + case _adapter_opt_return_void : return T_VOID; + case _adapter_opt_return_any : return T_CONFLICT; // sentinel value for "variable" + default : return T_OBJECT; + } + } + + static int ek_adapter_opt_spread_count(EntryKind ek) { + assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); + switch (ek) { + case _adapter_opt_spread_0 : return 0; + case _adapter_opt_spread_1_ref : return 1; + case _adapter_opt_spread_2_ref : return 2; + case _adapter_opt_spread_3_ref : return 3; + case _adapter_opt_spread_4_ref : return 4; + case _adapter_opt_spread_5_ref : return 5; + default : return -1; // sentinel value for "variable" + } + } + + static BasicType ek_adapter_opt_spread_type(EntryKind ek) { + assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); + switch (ek) { + // (there is no _adapter_opt_spread_boolean; we use byte) + case _adapter_opt_spread_byte : return T_BYTE; + case _adapter_opt_spread_char : return T_CHAR; + case _adapter_opt_spread_short : return T_SHORT; + case _adapter_opt_spread_int : return T_INT; + case _adapter_opt_spread_long : return T_LONG; + case _adapter_opt_spread_float : return T_FLOAT; + case _adapter_opt_spread_double : return T_DOUBLE; + default : return T_OBJECT; } } @@ -228,12 +475,21 @@ class MethodHandles: AllStatic { // Bit mask of conversion_op values. May vary by platform. static int adapter_conversion_ops_supported_mask(); + static bool conv_op_supported(int conv_op) { + assert(conv_op_valid(conv_op), ""); + return ((adapter_conversion_ops_supported_mask() & nth_bit(conv_op)) != 0); + } + // Offset in words that the interpreter stack pointer moves when an argument is pushed. // The stack_move value must always be a multiple of this. static int stack_move_unit() { return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords; } + // Adapter frame traversal. (Implementation-specific.) + static frame ricochet_frame_sender(const frame& fr, RegisterMap* reg_map); + static void ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map); + enum { CONV_VMINFO_SIGN_FLAG = 0x80 }; // Shift values for prim-to-prim conversions. static int adapter_prim_to_prim_subword_vminfo(BasicType dest) { @@ -429,6 +685,7 @@ class MethodHandles: AllStatic { // Fill in the fields of an AdapterMethodHandle mh. (MH.type must be pre-filled.) static void init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS); + static void ensure_vmlayout_field(Handle target, TRAPS); #ifdef ASSERT static bool spot_check_entry_names(); @@ -441,6 +698,8 @@ class MethodHandles: AllStatic { KlassHandle receiver_klass, TRAPS); +public: + static bool is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst); static bool same_basic_type_for_arguments(BasicType src, BasicType dst, bool raw = false, bool for_return = false); @@ -448,12 +707,54 @@ class MethodHandles: AllStatic { return same_basic_type_for_arguments(src, dst, raw, true); } - enum { // arg_mask values + static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS); + +#ifdef TARGET_ARCH_x86 +# include "methodHandles_x86.hpp" +#endif +#ifdef TARGET_ARCH_sparc +#define TARGET_ARCH_NYI_6939861 1 //FIXME +//# include "methodHandles_sparc.hpp" +#endif +#ifdef TARGET_ARCH_zero +#define TARGET_ARCH_NYI_6939861 1 //FIXME +//# include "methodHandles_zero.hpp" +#endif +#ifdef TARGET_ARCH_arm +#define TARGET_ARCH_NYI_6939861 1 //FIXME +//# include "methodHandles_arm.hpp" +#endif +#ifdef TARGET_ARCH_ppc +#define TARGET_ARCH_NYI_6939861 1 //FIXME +//# include "methodHandles_ppc.hpp" +#endif + +#ifdef TARGET_ARCH_NYI_6939861 + // Here are some backward compatible declarations until the 6939861 ports are updated. + #define _adapter_flyby (_EK_LIMIT + 10) + #define _adapter_ricochet (_EK_LIMIT + 11) + #define _adapter_opt_spread_1 _adapter_opt_spread_1_ref + #define _adapter_opt_spread_more _adapter_opt_spread_ref + enum { _INSERT_NO_MASK = -1, _INSERT_REF_MASK = 0, _INSERT_INT_MASK = 1, _INSERT_LONG_MASK = 3 }; + static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) { + arg_type = ek_bound_mh_arg_type(ek); + arg_mask = 0; + arg_slots = type2size[arg_type];; + } + static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) { + int swap_slots = ek_adapter_opt_swap_slots(ek); + rotate = ek_adapter_opt_swap_mode(ek); + swap_bytes = swap_slots * Interpreter::stackElementSize; + } + static int get_ek_adapter_opt_spread_info(EntryKind ek) { + return ek_adapter_opt_spread_count(ek); + } + static void insert_arg_slots(MacroAssembler* _masm, RegisterOrConstant arg_slots, int arg_mask, @@ -466,8 +767,7 @@ class MethodHandles: AllStatic { Register temp_reg, Register temp2_reg, Register temp3_reg = noreg); static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; - - static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS); +#endif //TARGET_ARCH_NYI_6939861 }; diff --git a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp index e195ff4854d..b761e51a511 100644 --- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp +++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp @@ -1,7 +1,26 @@ /* -* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved. -* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. -*/ + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ #include "precompiled.hpp" #include "runtime/advancedThresholdPolicy.hpp" diff --git a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp index 1cab763e034..c1ebd0dc4e6 100644 --- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp +++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp @@ -1,7 +1,26 @@ /* -* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved. -* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. -*/ + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ #ifndef SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP #define SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP diff --git a/hotspot/src/share/vm/runtime/deoptimization.cpp b/hotspot/src/share/vm/runtime/deoptimization.cpp index 6313c547609..b2f172d1d70 100644 --- a/hotspot/src/share/vm/runtime/deoptimization.cpp +++ b/hotspot/src/share/vm/runtime/deoptimization.cpp @@ -90,12 +90,14 @@ bool DeoptimizationMarker::_is_active = false; Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, int caller_adjustment, + int caller_actual_parameters, int number_of_frames, intptr_t* frame_sizes, address* frame_pcs, BasicType return_type) { _size_of_deoptimized_frame = size_of_deoptimized_frame; _caller_adjustment = caller_adjustment; + _caller_actual_parameters = caller_actual_parameters; _number_of_frames = number_of_frames; _frame_sizes = frame_sizes; _frame_pcs = frame_pcs; @@ -373,6 +375,28 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); } + // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized + // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather + // than simply use array->sender.pc(). This requires us to walk the current set of frames + // + frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame + deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller + + // It's possible that the number of paramters at the call site is + // different than number of arguments in the callee when method + // handles are used. If the caller is interpreted get the real + // value so that the proper amount of space can be added to it's + // frame. + int caller_actual_parameters = callee_parameters; + if (deopt_sender.is_interpreted_frame()) { + methodHandle method = deopt_sender.interpreter_frame_method(); + Bytecode_invoke cur = Bytecode_invoke_check(method, + deopt_sender.interpreter_frame_bci()); + Symbol* signature = method->constants()->signature_ref_at(cur.index()); + ArgumentSizeComputer asc(signature); + caller_actual_parameters = asc.size() + (cur.has_receiver() ? 1 : 0); + } + // // frame_sizes/frame_pcs[0] oldest frame (int or c2i) // frame_sizes/frame_pcs[1] next oldest frame (int) @@ -391,7 +415,13 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread // frame[number_of_frames - 1 ] = on_stack_size(youngest) // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) - frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, + int caller_parms = callee_parameters; + if (index == array->frames() - 1) { + // Use the value from the interpreted caller + caller_parms = caller_actual_parameters; + } + frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms, + callee_parameters, callee_locals, index == 0, popframe_extra_args); @@ -418,28 +448,6 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread // Compute information for handling adapters and adjusting the frame size of the caller. int caller_adjustment = 0; - // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized - // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather - // than simply use array->sender.pc(). This requires us to walk the current set of frames - // - frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame - deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller - - // It's possible that the number of paramters at the call site is - // different than number of arguments in the callee when method - // handles are used. If the caller is interpreted get the real - // value so that the proper amount of space can be added to it's - // frame. - int sender_callee_parameters = callee_parameters; - if (deopt_sender.is_interpreted_frame()) { - methodHandle method = deopt_sender.interpreter_frame_method(); - Bytecode_invoke cur = Bytecode_invoke_check(method, - deopt_sender.interpreter_frame_bci()); - Symbol* signature = method->constants()->signature_ref_at(cur.index()); - ArgumentSizeComputer asc(signature); - sender_callee_parameters = asc.size() + (cur.has_receiver() ? 1 : 0); - } - // Compute the amount the oldest interpreter frame will have to adjust // its caller's stack by. If the caller is a compiled frame then // we pretend that the callee has no parameters so that the @@ -454,11 +462,11 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread if (deopt_sender.is_compiled_frame()) { caller_adjustment = last_frame_adjust(0, callee_locals); - } else if (callee_locals > sender_callee_parameters) { + } else if (callee_locals > caller_actual_parameters) { // The caller frame may need extending to accommodate // non-parameter locals of the first unpacked interpreted frame. // Compute that adjustment. - caller_adjustment = last_frame_adjust(sender_callee_parameters, callee_locals); + caller_adjustment = last_frame_adjust(caller_actual_parameters, callee_locals); } // If the sender is deoptimized the we must retrieve the address of the handler @@ -473,6 +481,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, caller_adjustment * BytesPerWord, + caller_actual_parameters, number_of_frames, frame_sizes, frame_pcs, @@ -570,7 +579,7 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m UnrollBlock* info = array->unroll_block(); // Unpack the interpreter frames and any adapter frame (c2 only) we might create. - array->unpack_to_stack(stub_frame, exec_mode); + array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); BasicType bt = info->return_type(); diff --git a/hotspot/src/share/vm/runtime/deoptimization.hpp b/hotspot/src/share/vm/runtime/deoptimization.hpp index 7f6c8c37056..853cd9a017e 100644 --- a/hotspot/src/share/vm/runtime/deoptimization.hpp +++ b/hotspot/src/share/vm/runtime/deoptimization.hpp @@ -138,6 +138,9 @@ class Deoptimization : AllStatic { intptr_t* _register_block; // Block for storing callee-saved registers. BasicType _return_type; // Tells if we have to restore double or long return value intptr_t _initial_fp; // FP of the sender frame + int _caller_actual_parameters; // The number of actual arguments at the + // interpreted caller of the deoptimized frame + // The following fields are used as temps during the unpacking phase // (which is tight on registers, especially on x86). They really ought // to be PD variables but that involves moving this class into its own @@ -149,6 +152,7 @@ class Deoptimization : AllStatic { // Constructor UnrollBlock(int size_of_deoptimized_frame, int caller_adjustment, + int caller_actual_parameters, int number_of_frames, intptr_t* frame_sizes, address* frames_pcs, @@ -168,6 +172,8 @@ class Deoptimization : AllStatic { void set_initial_fp(intptr_t fp) { _initial_fp = fp; } + int caller_actual_parameters() const { return _caller_actual_parameters; } + // Accessors used by the code generator for the unpack stub. static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); } static int caller_adjustment_offset_in_bytes() { return offset_of(UnrollBlock, _caller_adjustment); } diff --git a/hotspot/src/share/vm/runtime/frame.cpp b/hotspot/src/share/vm/runtime/frame.cpp index b5352fc00d6..e55543311e2 100644 --- a/hotspot/src/share/vm/runtime/frame.cpp +++ b/hotspot/src/share/vm/runtime/frame.cpp @@ -33,6 +33,7 @@ #include "oops/methodOop.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" +#include "prims/methodHandles.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -169,6 +170,11 @@ void frame::set_pc(address newpc ) { } // type testers +bool frame::is_ricochet_frame() const { + RicochetBlob* rcb = SharedRuntime::ricochet_blob(); + return (_cb == rcb && rcb != NULL && rcb->returns_to_bounce_addr(_pc)); +} + bool frame::is_deoptimized_frame() const { assert(_deopt_state != unknown, "not answerable"); return _deopt_state == is_deoptimized; @@ -341,12 +347,18 @@ frame frame::java_sender() const { frame frame::real_sender(RegisterMap* map) const { frame result = sender(map); - while (result.is_runtime_frame()) { + while (result.is_runtime_frame() || + result.is_ricochet_frame()) { result = result.sender(map); } return result; } +frame frame::sender_for_ricochet_frame(RegisterMap* map) const { + assert(is_ricochet_frame(), ""); + return MethodHandles::ricochet_frame_sender(*this, map); +} + // Note: called by profiler - NOT for current thread frame frame::profile_find_Java_sender_frame(JavaThread *thread) { // If we don't recognize this frame, walk back up the stack until we do @@ -529,6 +541,7 @@ jint frame::interpreter_frame_expression_stack_size() const { const char* frame::print_name() const { if (is_native_frame()) return "Native"; if (is_interpreted_frame()) return "Interpreted"; + if (is_ricochet_frame()) return "Ricochet"; if (is_compiled_frame()) { if (is_deoptimized_frame()) return "Deoptimized"; return "Compiled"; @@ -715,6 +728,8 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); } else if (_cb->is_deoptimization_stub()) { st->print("v ~DeoptimizationBlob"); + } else if (_cb->is_ricochet_stub()) { + st->print("v ~RichochetBlob"); } else if (_cb->is_exception_stub()) { st->print("v ~ExceptionBlob"); } else if (_cb->is_safepoint_stub()) { @@ -978,6 +993,9 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { assert(_cb != NULL, "sanity check"); + if (_cb == SharedRuntime::ricochet_blob()) { + oops_ricochet_do(f, reg_map); + } if (_cb->oop_maps() != NULL) { OopMapSet::oops_do(this, reg_map, f); @@ -996,6 +1014,11 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const Register cf->do_code_blob(_cb); } +void frame::oops_ricochet_do(OopClosure* f, const RegisterMap* map) { + assert(is_ricochet_frame(), ""); + MethodHandles::ricochet_frame_oops_do(*this, f, map); +} + class CompiledArgumentOopFinder: public SignatureInfo { protected: OopClosure* _f; @@ -1400,7 +1423,7 @@ void FrameValues::describe(int owner, intptr_t* location, const char* descriptio } -bool FrameValues::validate() { +void FrameValues::validate() { _values.sort(compare); bool error = false; FrameValue prev; @@ -1423,19 +1446,32 @@ bool FrameValues::validate() { prev = fv; } } - return error; + assert(!error, "invalid layout"); } void FrameValues::print() { _values.sort(compare); - intptr_t* v0 = _values.at(0).location; - intptr_t* v1 = _values.at(_values.length() - 1).location; + JavaThread* thread = JavaThread::current(); + + // Sometimes values like the fp can be invalid values if the + // register map wasn't updated during the walk. Trim out values + // that aren't actually in the stack of the thread. + int min_index = 0; + int max_index = _values.length() - 1; + intptr_t* v0 = _values.at(min_index).location; + while (!thread->is_in_stack((address)v0)) { + v0 = _values.at(++min_index).location; + } + intptr_t* v1 = _values.at(max_index).location; + while (!thread->is_in_stack((address)v1)) { + v1 = _values.at(--max_index).location; + } intptr_t* min = MIN2(v0, v1); intptr_t* max = MAX2(v0, v1); intptr_t* cur = max; intptr_t* last = NULL; - for (int i = _values.length() - 1; i >= 0; i--) { + for (int i = max_index; i >= min_index; i--) { FrameValue fv = _values.at(i); while (cur > fv.location) { tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur); diff --git a/hotspot/src/share/vm/runtime/frame.hpp b/hotspot/src/share/vm/runtime/frame.hpp index 66d17a9f76e..0273edf928f 100644 --- a/hotspot/src/share/vm/runtime/frame.hpp +++ b/hotspot/src/share/vm/runtime/frame.hpp @@ -135,6 +135,7 @@ class frame VALUE_OBJ_CLASS_SPEC { bool is_interpreted_frame() const; bool is_java_frame() const; bool is_entry_frame() const; // Java frame called from C? + bool is_ricochet_frame() const; bool is_native_frame() const; bool is_runtime_frame() const; bool is_compiled_frame() const; @@ -175,6 +176,7 @@ class frame VALUE_OBJ_CLASS_SPEC { // Helper methods for better factored code in frame::sender frame sender_for_compiled_frame(RegisterMap* map) const; frame sender_for_entry_frame(RegisterMap* map) const; + frame sender_for_ricochet_frame(RegisterMap* map) const; frame sender_for_interpreter_frame(RegisterMap* map) const; frame sender_for_native_frame(RegisterMap* map) const; @@ -400,6 +402,7 @@ class frame VALUE_OBJ_CLASS_SPEC { // Oops-do's void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f); void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true); + void oops_ricochet_do(OopClosure* f, const RegisterMap* map); private: void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f); @@ -508,7 +511,7 @@ class FrameValues { // Used by frame functions to describe locations. void describe(int owner, intptr_t* location, const char* description, int priority = 0); - bool validate(); + void validate(); void print(); }; diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp index 9aa96dc1218..f0a50abb7f8 100644 --- a/hotspot/src/share/vm/runtime/globals.hpp +++ b/hotspot/src/share/vm/runtime/globals.hpp @@ -1460,8 +1460,10 @@ class CommandLineFlags { product(intx, ParallelGCBufferWastePct, 10, \ "wasted fraction of parallel allocation buffer.") \ \ - product(bool, ParallelGCRetainPLAB, true, \ - "Retain parallel allocation buffers across scavenges.") \ + diagnostic(bool, ParallelGCRetainPLAB, false, \ + "Retain parallel allocation buffers across scavenges; " \ + " -- disabled because this currently conflicts with " \ + " parallel card scanning under certain conditions ") \ \ product(intx, TargetPLABWastePct, 10, \ "target wasted space in last buffer as pct of overall allocation")\ @@ -1495,7 +1497,15 @@ class CommandLineFlags { product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ "The desired number of objects to claim from the overflow list") \ \ - product(uintx, CMSParPromoteBlocksToClaim, 16, \ + diagnostic(intx, ParGCStridesPerThread, 2, \ + "The number of strides per worker thread that we divide up the " \ + "card table scanning work into") \ + \ + diagnostic(intx, ParGCCardsPerStrideChunk, 256, \ + "The number of cards in each chunk of the parallel chunks used " \ + "during card table scanning") \ + \ + product(uintx, CMSParPromoteBlocksToClaim, 16, \ "Number of blocks to attempt to claim when refilling CMS LAB for "\ "parallel GC.") \ \ @@ -3708,6 +3718,10 @@ class CommandLineFlags { diagnostic(bool, OptimizeMethodHandles, true, \ "when constructing method handles, try to improve them") \ \ + diagnostic(bool, UseRicochetFrames, true, \ + "use ricochet stack frames for method handle combination, " \ + "if the platform supports them") \ + \ experimental(bool, TrustFinalNonStaticFields, false, \ "trust final non-static declarations for constant folding") \ \ diff --git a/hotspot/src/share/vm/runtime/sharedRuntime.cpp b/hotspot/src/share/vm/runtime/sharedRuntime.cpp index f60b2f07029..d13b734caf6 100644 --- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp +++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp @@ -88,6 +88,8 @@ HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int, HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, char*, int, char*, int, char*, int); +RicochetBlob* SharedRuntime::_ricochet_blob = NULL; + // Implementation of SharedRuntime #ifndef PRODUCT @@ -460,6 +462,10 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thre if (Interpreter::contains(return_address)) { return Interpreter::rethrow_exception_entry(); } + // Ricochet frame unwind code + if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) { + return SharedRuntime::ricochet_blob()->exception_addr(); + } guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); @@ -1174,6 +1180,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* assert(stub_frame.is_runtime_frame(), "sanity check"); frame caller_frame = stub_frame.sender(®_map); assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); + assert(!caller_frame.is_ricochet_frame(), "unexpected frame"); #endif /* ASSERT */ methodHandle callee_method; @@ -1222,6 +1229,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() || + caller_frame.is_ricochet_frame() || is_mh_invoke_via_adapter) { methodOop callee = thread->callee_target(); guarantee(callee != NULL && callee->is_method(), "bad handshake"); diff --git a/hotspot/src/share/vm/runtime/sharedRuntime.hpp b/hotspot/src/share/vm/runtime/sharedRuntime.hpp index 268bd3a2f4e..f1c5c7f08c9 100644 --- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp +++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp @@ -58,6 +58,8 @@ class SharedRuntime: AllStatic { static RuntimeStub* _resolve_virtual_call_blob; static RuntimeStub* _resolve_static_call_blob; + static RicochetBlob* _ricochet_blob; + static SafepointBlob* _polling_page_safepoint_handler_blob; static SafepointBlob* _polling_page_return_handler_blob; #ifdef COMPILER2 @@ -213,6 +215,16 @@ class SharedRuntime: AllStatic { return _resolve_static_call_blob->entry_point(); } + static RicochetBlob* ricochet_blob() { +#ifdef X86 + // Currently only implemented on x86 + assert(!EnableInvokeDynamic || _ricochet_blob != NULL, "oops"); +#endif + return _ricochet_blob; + } + + static void generate_ricochet_blob(); + static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; } diff --git a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp index 4d23753521f..fae5312622a 100644 --- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp +++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp index 527c3f60b59..bc392c84b58 100644 --- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp +++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/hotspot/src/share/vm/runtime/vframeArray.cpp b/hotspot/src/share/vm/runtime/vframeArray.cpp index 2a16535310a..52b08099922 100644 --- a/hotspot/src/share/vm/runtime/vframeArray.cpp +++ b/hotspot/src/share/vm/runtime/vframeArray.cpp @@ -154,7 +154,8 @@ void vframeArrayElement::fill_in(compiledVFrame* vf) { int unpack_counter = 0; -void vframeArrayElement::unpack_on_stack(int callee_parameters, +void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, + int callee_parameters, int callee_locals, frame* caller, bool is_top_frame, @@ -270,6 +271,7 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters, temps + callee_parameters, popframe_preserved_args_size_in_words, locks, + caller_actual_parameters, callee_parameters, callee_locals, caller, @@ -415,7 +417,8 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters, } -int vframeArrayElement::on_stack_size(int callee_parameters, +int vframeArrayElement::on_stack_size(int caller_actual_parameters, + int callee_parameters, int callee_locals, bool is_top_frame, int popframe_extra_stack_expression_els) const { @@ -426,6 +429,7 @@ int vframeArrayElement::on_stack_size(int callee_parameters, temps + callee_parameters, popframe_extra_stack_expression_els, locks, + caller_actual_parameters, callee_parameters, callee_locals, is_top_frame); @@ -496,7 +500,7 @@ void vframeArray::fill_in(JavaThread* thread, } } -void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) { +void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) { // stack picture // unpack_frame // [new interpreter frames ] (frames are skeletal but walkable) @@ -525,7 +529,8 @@ void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) { for (index = frames() - 1; index >= 0 ; index--) { int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters(); int callee_locals = index == 0 ? 0 : element(index-1)->method()->max_locals(); - element(index)->unpack_on_stack(callee_parameters, + element(index)->unpack_on_stack(caller_actual_parameters, + callee_parameters, callee_locals, &caller_frame, index == 0, @@ -534,6 +539,7 @@ void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) { Deoptimization::unwind_callee_save_values(element(index)->iframe(), this); } caller_frame = *element(index)->iframe(); + caller_actual_parameters = callee_parameters; } diff --git a/hotspot/src/share/vm/runtime/vframeArray.hpp b/hotspot/src/share/vm/runtime/vframeArray.hpp index c45dfb73aa9..7072d22e074 100644 --- a/hotspot/src/share/vm/runtime/vframeArray.hpp +++ b/hotspot/src/share/vm/runtime/vframeArray.hpp @@ -83,13 +83,15 @@ class vframeArrayElement : public _ValueObj { // Returns the on stack word size for this frame // callee_parameters is the number of callee locals residing inside this frame - int on_stack_size(int callee_parameters, + int on_stack_size(int caller_actual_parameters, + int callee_parameters, int callee_locals, bool is_top_frame, int popframe_extra_stack_expression_els) const; // Unpacks the element to skeletal interpreter frame - void unpack_on_stack(int callee_parameters, + void unpack_on_stack(int caller_actual_parameters, + int callee_parameters, int callee_locals, frame* caller, bool is_top_frame, @@ -190,7 +192,7 @@ class vframeArray: public CHeapObj { int frame_size() const { return _frame_size; } // Unpack the array on the stack passed in stack interval - void unpack_to_stack(frame &unpack_frame, int exec_mode); + void unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters); // Deallocates monitor chunks allocated during deoptimization. // This should be called when the array is not used anymore. diff --git a/hotspot/src/share/vm/services/heapDumper.cpp b/hotspot/src/share/vm/services/heapDumper.cpp index 0252ddfff7c..46df8b721da 100644 --- a/hotspot/src/share/vm/services/heapDumper.cpp +++ b/hotspot/src/share/vm/services/heapDumper.cpp @@ -1649,6 +1649,9 @@ int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) { if (fr->is_entry_frame()) { last_entry_frame = fr; } + if (fr->is_ricochet_frame()) { + fr->oops_ricochet_do(&blk, vf->register_map()); + } } vf = vf->sender(); } diff --git a/hotspot/test/compiler/7042153/Test7042153.java b/hotspot/test/compiler/7042153/Test7042153.java new file mode 100644 index 00000000000..4319b3b2036 --- /dev/null +++ b/hotspot/test/compiler/7042153/Test7042153.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7042153 + * @summary Bad folding of IfOps with unloaded constant arguments in C1 + * + * @run main/othervm -Xcomp Test7042153 + */ + +import java.lang.reflect.*; + +public class Test7042153 { + static public class Bar { } + static public class Foo { } + + static volatile boolean z; + public static void main(String [] args) { + Class cx = Bar.class; + Class cy = Foo.class; + z = (cx == cy); + } +} diff --git a/jaxp/.hgtags b/jaxp/.hgtags index a61349e7ee7..ddd628c1171 100644 --- a/jaxp/.hgtags +++ b/jaxp/.hgtags @@ -116,3 +116,4 @@ be3758943770a0a3dd4be6a1cb4063507c4d7062 jdk7-b138 28c7c0ed2444607829ba11ad827f8d52197a2830 jdk7-b139 c8136fd161c83917f87e93b14fa2ba3483f9be83 jdk7-b140 e1b5ef243445bf836d095fd44866e1771ef99374 jdk7-b141 +7d067af4b25e4b7e6b28bef48527d67f8650e6c5 jdk7-b142 diff --git a/jaxws/.hgtags b/jaxws/.hgtags index 9fc1479b23f..96c0d88cb82 100644 --- a/jaxws/.hgtags +++ b/jaxws/.hgtags @@ -116,3 +116,4 @@ cc956c8a8255583535597e9a63db23c510e9a063 jdk7-b138 c025078c8362076503bb83b8e4da14ba7b347940 jdk7-b139 82a9022c4f21b1313023c8303b557a17c4106701 jdk7-b140 66826b0aec5a1834da3821c35cf85ac154e9b04d jdk7-b141 +0ef3ef823c39eee3d670e58027c3219cb66f0283 jdk7-b142 diff --git a/jaxws/jaxws.properties b/jaxws/jaxws.properties index cfae88e1bc1..a1544f4bb93 100644 --- a/jaxws/jaxws.properties +++ b/jaxws/jaxws.properties @@ -25,8 +25,8 @@ drops.master.copy.base=${drops.dir} -jaxws_src.bundle.name=jdk7-jaxws2_2_4-b01-2011_04_08.zip -jaxws_src.bundle.md5.checksum=9f35dd731c99ddb62db650aaf20e5bf4 +jaxws_src.bundle.name=jdk7-jaxws2_2_4-b02-2011_05_09.zip +jaxws_src.bundle.md5.checksum=0972a58090e8b2735d91a2f5d1ae1964 jaxws_src.master.bundle.dir=${drops.master.copy.base} jaxws_src.master.bundle.url.base=http://download.java.net/glassfish/components/jax-ws/openjdk/jdk7 diff --git a/jdk/.hgtags b/jdk/.hgtags index 8a7ff58323d..ebd78b31459 100644 --- a/jdk/.hgtags +++ b/jdk/.hgtags @@ -116,3 +116,4 @@ aa13e7702cd9d8aca9aa38f1227f966990866944 jdk7-b136 d80954a89b49fda47c0c5cace65a17f5a758b8bd jdk7-b139 9315c733fb17ddfb9fb44be7e0ffea37bf3c727d jdk7-b140 63eeefe118da18c75ba3d36266768cd1ccaaca6b jdk7-b141 +312612e89ece62633f4809706dec00bcd5fe7c2d jdk7-b142 diff --git a/jdk/make/com/sun/nio/sctp/FILES_java.gmk b/jdk/make/com/sun/nio/sctp/FILES_java.gmk index 9d9c1a98e91..725131e7280 100644 --- a/jdk/make/com/sun/nio/sctp/FILES_java.gmk +++ b/jdk/make/com/sun/nio/sctp/FILES_java.gmk @@ -38,7 +38,7 @@ FILES_java = \ com/sun/nio/sctp/SctpMultiChannel.java \ com/sun/nio/sctp/SctpServerChannel.java \ com/sun/nio/sctp/SctpSocketOption.java \ - com/sun/nio/sctp/SctpStandardSocketOption.java \ + com/sun/nio/sctp/SctpStandardSocketOptions.java \ com/sun/nio/sctp/SendFailedNotification.java \ com/sun/nio/sctp/ShutdownNotification.java \ \ diff --git a/jdk/make/java/management/mapfile-vers b/jdk/make/java/management/mapfile-vers index 1fb36b78c49..21b92d45f9e 100644 --- a/jdk/make/java/management/mapfile-vers +++ b/jdk/make/java/management/mapfile-vers @@ -49,6 +49,7 @@ SUNWprivate_1.1 { Java_sun_management_Flag_setStringValue; Java_sun_management_GarbageCollectorImpl_getCollectionCount; Java_sun_management_GarbageCollectorImpl_getCollectionTime; + Java_sun_management_GarbageCollectorImpl_setNotificationEnabled; Java_sun_management_GcInfoBuilder_fillGcAttributeInfo; Java_sun_management_GcInfoBuilder_getLastGcInfo0; Java_sun_management_GcInfoBuilder_getNumGcExtAttributes; diff --git a/jdk/make/java/nio/FILES_java.gmk b/jdk/make/java/nio/FILES_java.gmk index d09a966db38..41397cacdc0 100644 --- a/jdk/make/java/nio/FILES_java.gmk +++ b/jdk/make/java/nio/FILES_java.gmk @@ -71,7 +71,7 @@ FILES_src = \ java/nio/charset/CoderMalfunctionError.java \ java/nio/charset/CodingErrorAction.java \ java/nio/charset/MalformedInputException.java \ - java/nio/charset/StandardCharset.java \ + java/nio/charset/StandardCharsets.java \ java/nio/charset/UnmappableCharacterException.java \ \ java/nio/charset/spi/CharsetProvider.java \ @@ -116,7 +116,7 @@ FILES_src = \ java/nio/file/SimpleFileVisitor.java \ java/nio/file/StandardCopyOption.java \ java/nio/file/StandardOpenOption.java \ - java/nio/file/StandardWatchEventKind.java \ + java/nio/file/StandardWatchEventKinds.java \ java/nio/file/TempFileHelper.java \ java/nio/file/WatchEvent.java \ java/nio/file/WatchKey.java \ diff --git a/jdk/make/sun/rmi/rmi/Makefile b/jdk/make/sun/rmi/rmi/Makefile index 35e4163c5a8..127777229e4 100644 --- a/jdk/make/sun/rmi/rmi/Makefile +++ b/jdk/make/sun/rmi/rmi/Makefile @@ -85,16 +85,21 @@ REMOTE_impls = \ sun.rmi.registry.RegistryImpl \ sun.rmi.transport.DGCImpl -ifeq ($(PLATFORM), windows) -build: stubs -else # PLATFORM -ifneq ($(ARCH_DATA_MODEL), 32) -build: stubs -else # ARCH_DATA_MODEL -build: stubs bin +# +# The java-rmi.cgi script in bin/ only gets delivered in certain situations +# +BUILD_TARGETS = stubs +ifeq ($(PLATFORM), linux) + BUILD_TARGETS += bin endif +ifeq ($(PLATFORM), solaris) + ifeq ($(ARCH_DATA_MODEL), 32) + BUILD_TARGETS += bin + endif endif +build: $(BUILD_TARGETS) + clean clobber:: bin.clean diff --git a/jdk/make/sun/xawt/mapfile-vers b/jdk/make/sun/xawt/mapfile-vers index 096bfe55326..8a12b69c314 100644 --- a/jdk/make/sun/xawt/mapfile-vers +++ b/jdk/make/sun/xawt/mapfile-vers @@ -158,7 +158,6 @@ SUNWprivate_1.1 { Java_sun_awt_X11_XRobotPeer_mouseReleaseImpl; Java_sun_awt_X11_XRobotPeer_mouseWheelImpl; Java_sun_awt_X11_XRobotPeer_setup; - Java_sun_awt_X11_XRobotPeer__1dispose; Java_sun_awt_X11_XToolkit_getNumberOfButtonsImpl; Java_java_awt_Component_initIDs; Java_java_awt_Container_initIDs; diff --git a/jdk/src/share/classes/com/sun/management/GarbageCollectionNotificationInfo.java b/jdk/src/share/classes/com/sun/management/GarbageCollectionNotificationInfo.java new file mode 100644 index 00000000000..64fb94b3fb8 --- /dev/null +++ b/jdk/src/share/classes/com/sun/management/GarbageCollectionNotificationInfo.java @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.sun.management; + +import javax.management.Notification; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeDataView; +import javax.management.openmbean.CompositeType; +import java.util.Collection; +import java.util.Collections; +import sun.management.GarbageCollectionNotifInfoCompositeData; + +/** + * The information about a garbage collection + * + *

+ * A garbage collection notification is emitted by {@link GarbageCollectorMXBean} + * when the Java virtual machine completes a garbage collection action + * The notification emitted will contain the garbage collection notification + * information about the status of the memory: + * + *

  • The name of the garbage collector used perform the collection.
  • + *
  • The action performed by the garbage collector.
  • + *
  • The cause of the garbage collection action.
  • + *
  • A {@link GcInfo} object containing some statistics about the GC cycle + (start time, end time) and the memory usage before and after + the GC cycle.
  • + * + * + *

    + * A {@link CompositeData CompositeData} representing + * the {@code GarbageCollectionNotificationInfo} object + * is stored in the + * {@linkplain javax.management.Notification#setUserData userdata} + * of a {@linkplain javax.management.Notification notification}. + * The {@link #from from} method is provided to convert from + * a {@code CompositeData} to a {@code GarbageCollectionNotificationInfo} + * object. For example: + * + *

    + *      Notification notif;
    + *
    + *      // receive the notification emitted by a GarbageCollectorMXBean and set to notif
    + *      ...
    + *
    + *      String notifType = notif.getType();
    + *      if (notifType.equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) {
    + *          // retrieve the garbage collection notification information
    + *          CompositeData cd = (CompositeData) notif.getUserData();
    + *          GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from(cd);
    + *          ....
    + *      }
    + * 
    + * + *

    + * The type of the notification emitted by a {@code GarbageCollectorMXBean} is: + *

      + *
    • A {@linkplain #GARBAGE_COLLECTION_NOTIFICATION garbage collection notification}. + *
      Used by every notification emitted by the garbage collector, the details about + * the notification are provided in the {@linkplain #getGcAction action} String + *

    • + *
    + **/ + +public class GarbageCollectionNotificationInfo implements CompositeDataView { + + private final String gcName; + private final String gcAction; + private final String gcCause; + private final GcInfo gcInfo; + private final CompositeData cdata; + + /** + * Notification type denoting that + * the Java virtual machine has completed a garbage collection cycle. + * This notification is emitted by a {@link GarbageCollectorMXBean}. + * The value of this notification type is + * {@code com.sun.management.gc.notification}. + */ + public static final String GARBAGE_COLLECTION_NOTIFICATION = + "com.sun.management.gc.notification"; + + /** + * Constructs a {@code GarbageCollectionNotificationInfo} object. + * + * @param gcName The name of the garbage collector used to perform the collection + * @param gcAction The name of the action performed by the garbage collector + * @param gcCause The cause the garbage collection action + * @param gcInfo a GcInfo object providing statistics about the GC cycle + */ + public GarbageCollectionNotificationInfo(String gcName, + String gcAction, + String gcCause, + GcInfo gcInfo) { + if (gcName == null) { + throw new NullPointerException("Null gcName"); + } + if (gcAction == null) { + throw new NullPointerException("Null gcAction"); + } + if (gcCause == null) { + throw new NullPointerException("Null gcCause"); + } + this.gcName = gcName; + this.gcAction = gcAction; + this.gcCause = gcCause; + this.gcInfo = gcInfo; + this.cdata = new GarbageCollectionNotifInfoCompositeData(this); + } + + GarbageCollectionNotificationInfo(CompositeData cd) { + GarbageCollectionNotifInfoCompositeData.validateCompositeData(cd); + + this.gcName = GarbageCollectionNotifInfoCompositeData.getGcName(cd); + this.gcAction = GarbageCollectionNotifInfoCompositeData.getGcAction(cd); + this.gcCause = GarbageCollectionNotifInfoCompositeData.getGcCause(cd); + this.gcInfo = GarbageCollectionNotifInfoCompositeData.getGcInfo(cd); + this.cdata = cd; + } + + /** + * Returns the name of the garbage collector used to perform the collection + * + * @return the name of the garbage collector used to perform the collection + */ + public String getGcName() { + return gcName; + } + + /** + * Returns the action of the performed by the garbage collector + * + * @return the the action of the performed by the garbage collector + */ + public String getGcAction() { + return gcAction; + } + + /** + * Returns the cause the garbage collection + * + * @return the the cause the garbage collection + */ + public String getGcCause() { + return gcCause; + } + + /** + * Returns the GC information related to the last garbage collection + * + * @return the GC information related to the + * last garbage collection + */ + public GcInfo getGcInfo() { + return gcInfo; + } + + /** + * Returns a {@code GarbageCollectionNotificationInfo} object represented by the + * given {@code CompositeData}. + * The given {@code CompositeData} must contain + * the following attributes: + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Attribute NameType
    gcName{@code java.lang.String}
    gcAction{@code java.lang.String}
    gcCause{@code java.lang.String}
    gcInfo{@code javax.management.openmbean.CompositeData}
    + *
    + * + * @param cd {@code CompositeData} representing a + * {@code GarbageCollectionNotificationInfo} + * + * @throws IllegalArgumentException if {@code cd} does not + * represent a {@code GarbaageCollectionNotificationInfo} object. + * + * @return a {@code GarbageCollectionNotificationInfo} object represented + * by {@code cd} if {@code cd} is not {@code null}; + * {@code null} otherwise. + */ + public static GarbageCollectionNotificationInfo from(CompositeData cd) { + if (cd == null) { + return null; + } + + if (cd instanceof GarbageCollectionNotifInfoCompositeData) { + return ((GarbageCollectionNotifInfoCompositeData) cd).getGarbageCollectionNotifInfo(); + } else { + return new GarbageCollectionNotificationInfo(cd); + } + } + + public CompositeData toCompositeData(CompositeType ct) { + return cdata; + } + +} diff --git a/jdk/src/share/classes/com/sun/nio/sctp/MessageInfo.java b/jdk/src/share/classes/com/sun/nio/sctp/MessageInfo.java index ca3e13fc47a..b851b77ac43 100644 --- a/jdk/src/share/classes/com/sun/nio/sctp/MessageInfo.java +++ b/jdk/src/share/classes/com/sun/nio/sctp/MessageInfo.java @@ -179,7 +179,7 @@ public abstract class MessageInfo { * completely received. For messages being sent {@code true} indicates that * the message is complete, {@code false} indicates that the message is not * complete. How the send channel interprets this value depends on the value - * of its {@link SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE + * of its {@link SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE * SCTP_EXPLICIT_COMPLETE} socket option. * * @return {@code true} if, and only if, the message is complete @@ -192,7 +192,7 @@ public abstract class MessageInfo { *

    For messages being sent {@code true} indicates that * the message is complete, {@code false} indicates that the message is not * complete. How the send channel interprets this value depends on the value - * of its {@link SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE + * of its {@link SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE * SCTP_EXPLICIT_COMPLETE} socket option. * * @param complete diff --git a/jdk/src/share/classes/com/sun/nio/sctp/SctpChannel.java b/jdk/src/share/classes/com/sun/nio/sctp/SctpChannel.java index 8e0472ef1fa..d040a7a07ea 100644 --- a/jdk/src/share/classes/com/sun/nio/sctp/SctpChannel.java +++ b/jdk/src/share/classes/com/sun/nio/sctp/SctpChannel.java @@ -65,55 +65,55 @@ import java.nio.channels.SelectionKey; * Description * * - * {@link SctpStandardSocketOption#SCTP_DISABLE_FRAGMENTS + * {@link SctpStandardSocketOptions#SCTP_DISABLE_FRAGMENTS * SCTP_DISABLE_FRAGMENTS} * Enables or disables message fragmentation * * - * {@link SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE + * {@link SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE * SCTP_EXPLICIT_COMPLETE} * Enables or disables explicit message completion * * - * {@link SctpStandardSocketOption#SCTP_FRAGMENT_INTERLEAVE + * {@link SctpStandardSocketOptions#SCTP_FRAGMENT_INTERLEAVE * SCTP_FRAGMENT_INTERLEAVE} * Controls how the presentation of messages occur for the message * receiver * * - * {@link SctpStandardSocketOption#SCTP_INIT_MAXSTREAMS + * {@link SctpStandardSocketOptions#SCTP_INIT_MAXSTREAMS * SCTP_INIT_MAXSTREAMS} * The maximum number of streams requested by the local endpoint during * association initialization * * - * {@link SctpStandardSocketOption#SCTP_NODELAY SCTP_NODELAY} + * {@link SctpStandardSocketOptions#SCTP_NODELAY SCTP_NODELAY} * Enables or disable a Nagle-like algorithm * * - * {@link SctpStandardSocketOption#SCTP_PRIMARY_ADDR + * {@link SctpStandardSocketOptions#SCTP_PRIMARY_ADDR * SCTP_PRIMARY_ADDR} * Requests that the local SCTP stack use the given peer address as the * association primary * * - * {@link SctpStandardSocketOption#SCTP_SET_PEER_PRIMARY_ADDR + * {@link SctpStandardSocketOptions#SCTP_SET_PEER_PRIMARY_ADDR * SCTP_SET_PEER_PRIMARY_ADDR} * Requests that the peer mark the enclosed address as the association * primary * * - * {@link SctpStandardSocketOption#SO_SNDBUF + * {@link SctpStandardSocketOptions#SO_SNDBUF * SO_SNDBUF} * The size of the socket send buffer * * - * {@link SctpStandardSocketOption#SO_RCVBUF + * {@link SctpStandardSocketOptions#SO_RCVBUF * SO_RCVBUF} * The size of the socket receive buffer * * - * {@link SctpStandardSocketOption#SO_LINGER + * {@link SctpStandardSocketOptions#SO_LINGER * SO_LINGER} * Linger on close if data is present (when configured in blocking mode * only) @@ -449,7 +449,7 @@ public abstract class SctpChannel *

    This is a convience method and is equivalent to evaluating the * following expression: *

    -     * setOption(SctpStandardSocketOption.SCTP_INIT_MAXSTREAMS, SctpStandardSocketOption.InitMaxStreams.create(maxInStreams, maxOutStreams))
    +     * setOption(SctpStandardSocketOptions.SCTP_INIT_MAXSTREAMS, SctpStandardSocketOption.InitMaxStreams.create(maxInStreams, maxOutStreams))
          *  .connect(remote);
          * 
    * @@ -651,7 +651,7 @@ public abstract class SctpChannel * @throws IOException * If an I/O error occurs * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public abstract T getOption(SctpSocketOption name) throws IOException; @@ -680,7 +680,7 @@ public abstract class SctpChannel * @throws IOException * If an I/O error occurs * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public abstract SctpChannel setOption(SctpSocketOption name, T value) throws IOException; @@ -731,7 +731,7 @@ public abstract class SctpChannel * MessageInfo} will return {@code false}, and more invocations of this * method will be necessary to completely consume the messgae. Only * one message at a time will be partially delivered in any stream. The - * socket option {@link SctpStandardSocketOption#SCTP_FRAGMENT_INTERLEAVE + * socket option {@link SctpStandardSocketOptions#SCTP_FRAGMENT_INTERLEAVE * SCTP_FRAGMENT_INTERLEAVE} controls various aspects of what interlacing of * messages occurs. * @@ -804,7 +804,7 @@ public abstract class SctpChannel * and sufficient room becomes available, then the remaining bytes in the * given byte buffer are transmitted as a single message. Sending a message * is atomic unless explicit message completion {@link - * SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE SCTP_EXPLICIT_COMPLETE} + * SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE SCTP_EXPLICIT_COMPLETE} * socket option is enabled on this channel's socket. * *

    The message is transferred from the byte buffer as if by a regular diff --git a/jdk/src/share/classes/com/sun/nio/sctp/SctpMultiChannel.java b/jdk/src/share/classes/com/sun/nio/sctp/SctpMultiChannel.java index b2f0378064f..229d18ec039 100644 --- a/jdk/src/share/classes/com/sun/nio/sctp/SctpMultiChannel.java +++ b/jdk/src/share/classes/com/sun/nio/sctp/SctpMultiChannel.java @@ -69,55 +69,55 @@ import java.nio.channels.SelectionKey; * Description * * - * {@link SctpStandardSocketOption#SCTP_DISABLE_FRAGMENTS + * {@link SctpStandardSocketOptions#SCTP_DISABLE_FRAGMENTS * SCTP_DISABLE_FRAGMENTS} * Enables or disables message fragmentation * * - * {@link SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE + * {@link SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE * SCTP_EXPLICIT_COMPLETE} * Enables or disables explicit message completion * * - * {@link SctpStandardSocketOption#SCTP_FRAGMENT_INTERLEAVE + * {@link SctpStandardSocketOptions#SCTP_FRAGMENT_INTERLEAVE * SCTP_FRAGMENT_INTERLEAVE} * Controls how the presentation of messages occur for the message * receiver * * - * {@link SctpStandardSocketOption#SCTP_INIT_MAXSTREAMS + * {@link SctpStandardSocketOptions#SCTP_INIT_MAXSTREAMS * SCTP_INIT_MAXSTREAMS} * The maximum number of streams requested by the local endpoint during * association initialization * * - * {@link SctpStandardSocketOption#SCTP_NODELAY SCTP_NODELAY} + * {@link SctpStandardSocketOptions#SCTP_NODELAY SCTP_NODELAY} * Enables or disable a Nagle-like algorithm * * - * {@link SctpStandardSocketOption#SCTP_PRIMARY_ADDR + * {@link SctpStandardSocketOptions#SCTP_PRIMARY_ADDR * SCTP_PRIMARY_ADDR} * Requests that the local SCTP stack use the given peer address as the * association primary * * - * {@link SctpStandardSocketOption#SCTP_SET_PEER_PRIMARY_ADDR + * {@link SctpStandardSocketOptions#SCTP_SET_PEER_PRIMARY_ADDR * SCTP_SET_PEER_PRIMARY_ADDR} * Requests that the peer mark the enclosed address as the association * primary * * - * {@link SctpStandardSocketOption#SO_SNDBUF + * {@link SctpStandardSocketOptions#SO_SNDBUF * SO_SNDBUF} * The size of the socket send buffer * * - * {@link SctpStandardSocketOption#SO_RCVBUF + * {@link SctpStandardSocketOptions#SO_RCVBUF * SO_RCVBUF} * The size of the socket receive buffer * * - * {@link SctpStandardSocketOption#SO_LINGER + * {@link SctpStandardSocketOptions#SO_LINGER * SO_LINGER} * Linger on close if data is present (when configured in blocking mode * only) @@ -450,7 +450,7 @@ public abstract class SctpMultiChannel * @throws IOException * If an I/O error occurs * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public abstract T getOption(SctpSocketOption name, Association association) @@ -489,7 +489,7 @@ public abstract class SctpMultiChannel * @throws IOException * If an I/O error occurs * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public abstract SctpMultiChannel setOption(SctpSocketOption name, T value, @@ -542,7 +542,7 @@ public abstract class SctpMultiChannel * MessageInfo} will return {@code false}, and more invocations of this * method will be necessary to completely consume the messgae. Only * one message at a time will be partially delivered in any stream. The - * socket option {@link SctpStandardSocketOption#SCTP_FRAGMENT_INTERLEAVE + * socket option {@link SctpStandardSocketOptions#SCTP_FRAGMENT_INTERLEAVE * SCTP_FRAGMENT_INTERLEAVE} controls various aspects of what interlacing of * messages occurs. * @@ -635,14 +635,14 @@ public abstract class SctpMultiChannel * underlying output buffer, then the remaining bytes in the given byte * buffer are transmitted as a single message. Sending a message * is atomic unless explicit message completion {@link - * SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE SCTP_EXPLICIT_COMPLETE} + * SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE SCTP_EXPLICIT_COMPLETE} * socket option is enabled on this channel's socket. * *

    If this channel is in non-blocking mode, there is sufficient room * in the underlying output buffer, and an implicit association setup is * required, then the remaining bytes in the given byte buffer are * transmitted as a single message, subject to {@link - * SctpStandardSocketOption#SCTP_EXPLICIT_COMPLETE SCTP_EXPLICIT_COMPLETE}. + * SctpStandardSocketOptions#SCTP_EXPLICIT_COMPLETE SCTP_EXPLICIT_COMPLETE}. * If for any reason the message cannot * be delivered an {@link AssociationChangeNotification association * changed} notification is put on the SCTP stack with its {@code event} parameter set diff --git a/jdk/src/share/classes/com/sun/nio/sctp/SctpServerChannel.java b/jdk/src/share/classes/com/sun/nio/sctp/SctpServerChannel.java index 5c762edfe35..3867fc9ca34 100644 --- a/jdk/src/share/classes/com/sun/nio/sctp/SctpServerChannel.java +++ b/jdk/src/share/classes/com/sun/nio/sctp/SctpServerChannel.java @@ -53,7 +53,7 @@ import java.nio.channels.spi.AbstractSelectableChannel; * Description * * - * {@link SctpStandardSocketOption#SCTP_INIT_MAXSTREAMS + * {@link SctpStandardSocketOptions#SCTP_INIT_MAXSTREAMS * SCTP_INIT_MAXSTREAMS} * The maximum number of streams requested by the local endpoint during * association initialization @@ -360,7 +360,7 @@ public abstract class SctpServerChannel * @throws IOException * If an I/O error occurs * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public abstract T getOption(SctpSocketOption name) throws IOException; @@ -388,7 +388,7 @@ public abstract class SctpServerChannel * @throws IOException * If an I/O error occurs * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public abstract SctpServerChannel setOption(SctpSocketOption name, T value) diff --git a/jdk/src/share/classes/com/sun/nio/sctp/SctpSocketOption.java b/jdk/src/share/classes/com/sun/nio/sctp/SctpSocketOption.java index 37f954d7120..3e60dbfa090 100644 --- a/jdk/src/share/classes/com/sun/nio/sctp/SctpSocketOption.java +++ b/jdk/src/share/classes/com/sun/nio/sctp/SctpSocketOption.java @@ -33,6 +33,6 @@ import java.net.SocketOption; * * @since 1.7 * - * @see SctpStandardSocketOption + * @see SctpStandardSocketOptions */ public interface SctpSocketOption extends SocketOption { } diff --git a/jdk/src/share/classes/com/sun/nio/sctp/SctpStandardSocketOption.java b/jdk/src/share/classes/com/sun/nio/sctp/SctpStandardSocketOptions.java similarity index 97% rename from jdk/src/share/classes/com/sun/nio/sctp/SctpStandardSocketOption.java rename to jdk/src/share/classes/com/sun/nio/sctp/SctpStandardSocketOptions.java index 47ffa98238a..22c5f4a005b 100644 --- a/jdk/src/share/classes/com/sun/nio/sctp/SctpStandardSocketOption.java +++ b/jdk/src/share/classes/com/sun/nio/sctp/SctpStandardSocketOptions.java @@ -34,8 +34,8 @@ import sun.nio.ch.SctpStdSocketOption; * * @since 1.7 */ -public class SctpStandardSocketOption { - private SctpStandardSocketOption() {} +public class SctpStandardSocketOptions { + private SctpStandardSocketOptions() {} /** * Enables or disables message fragmentation. * @@ -127,7 +127,7 @@ public class SctpStandardSocketOption { * association initialization. * *

    The value of this socket option is an {@link - * SctpStandardSocketOption.InitMaxStreams InitMaxStreams}, that represents + * SctpStandardSocketOptions.InitMaxStreams InitMaxStreams}, that represents * the maximum number of inbound and outbound streams that an association * on the channel is prepared to support. * @@ -157,9 +157,9 @@ public class SctpStandardSocketOption { * the endpoints default value. */ public static final SctpSocketOption - SCTP_INIT_MAXSTREAMS = - new SctpStdSocketOption( - "SCTP_INIT_MAXSTREAMS", SctpStandardSocketOption.InitMaxStreams.class); + SCTP_INIT_MAXSTREAMS = + new SctpStdSocketOption( + "SCTP_INIT_MAXSTREAMS", SctpStandardSocketOptions.InitMaxStreams.class); /** * Enables or disables a Nagle-like algorithm. @@ -310,7 +310,7 @@ public class SctpStandardSocketOption { * This class is used to set the maximum number of inbound/outbound streams * used by the local endpoint during association initialization. An * instance of this class is used to set the {@link - * SctpStandardSocketOption#SCTP_INIT_MAXSTREAMS SCTP_INIT_MAXSTREAMS} + * SctpStandardSocketOptions#SCTP_INIT_MAXSTREAMS SCTP_INIT_MAXSTREAMS} * socket option. * * @since 1.7 diff --git a/jdk/src/share/classes/java/awt/Component.java b/jdk/src/share/classes/java/awt/Component.java index f577887abe6..2225633c0db 100644 --- a/jdk/src/share/classes/java/awt/Component.java +++ b/jdk/src/share/classes/java/awt/Component.java @@ -2887,11 +2887,12 @@ public abstract class Component implements ImageObserver, MenuContainer, /** * Invalidates this component and its ancestors. *

    - * All the ancestors of this component up to the nearest validate root are - * marked invalid also. If there is no a validate root container for this - * component, all of its ancestors up to the root of the hierarchy are - * marked invalid as well. Marking a container invalid indicates - * that the container needs to be laid out. + * By default, all the ancestors of the component up to the top-most + * container of the hierarchy are marked invalid. If the {@code + * java.awt.smartInvalidate} system property is set to {@code true}, + * invalidation stops on the nearest validate root of this component. + * Marking a container invalid indicates that the container needs to + * be laid out. *

    * This method is called automatically when any layout-related information * changes (e.g. setting the bounds of the component, or adding the diff --git a/jdk/src/share/classes/java/awt/Container.java b/jdk/src/share/classes/java/awt/Container.java index 88f0865b187..c59aa90bc5f 100644 --- a/jdk/src/share/classes/java/awt/Container.java +++ b/jdk/src/share/classes/java/awt/Container.java @@ -41,6 +41,8 @@ import java.io.ObjectStreamField; import java.io.PrintStream; import java.io.PrintWriter; +import java.security.AccessController; + import java.util.Arrays; import java.util.EventListener; import java.util.HashSet; @@ -60,6 +62,8 @@ import sun.awt.dnd.SunDropTargetEvent; import sun.java2d.pipe.Region; +import sun.security.action.GetBooleanAction; + /** * A generic Abstract Window Toolkit(AWT) container object is a component * that can contain other AWT components. @@ -1506,12 +1510,18 @@ public class Container extends Component { * Layout-related changes, such as bounds of the validate root descendants, * do not affect the layout of the validate root parent. This peculiarity * enables the {@code invalidate()} method to stop invalidating the - * component hierarchy when the method encounters a validate root. + * component hierarchy when the method encounters a validate root. However, + * to preserve backward compatibility this new optimized behavior is + * enabled only when the {@code java.awt.smartInvalidate} system property + * value is set to {@code true}. *

    - * If a component hierarchy contains validate roots, the {@code validate()} - * method must be invoked on the validate root of a previously invalidated - * component, rather than on the top-level container (such as a {@code - * Frame} object) to restore the validity of the hierarchy later. + * If a component hierarchy contains validate roots and the new optimized + * {@code invalidate()} behavior is enabled, the {@code validate()} method + * must be invoked on the validate root of a previously invalidated + * component to restore the validity of the hierarchy later. Otherwise, + * calling the {@code validate()} method on the top-level container (such + * as a {@code Frame} object) should be used to restore the validity of the + * component hierarchy. *

    * The {@code Window} class and the {@code Applet} class are the validate * roots in AWT. Swing introduces more validate roots. @@ -1527,13 +1537,20 @@ public class Container extends Component { return false; } + private static final boolean isJavaAwtSmartInvalidate; + static { + // Don't lazy-read because every app uses invalidate() + isJavaAwtSmartInvalidate = AccessController.doPrivileged( + new GetBooleanAction("java.awt.smartInvalidate")); + } + /** * Invalidates the parent of the container unless the container * is a validate root. */ @Override void invalidateParent() { - if (!isValidateRoot()) { + if (!isJavaAwtSmartInvalidate || !isValidateRoot()) { super.invalidateParent(); } } @@ -1572,9 +1589,8 @@ public class Container extends Component { * automatically. Note that the ancestors of the container may be * invalidated also (see {@link Component#invalidate} for details.) * Therefore, to restore the validity of the hierarchy, the {@code - * validate()} method should be invoked on a validate root of an - * invalidated component, or on the top-most container if the hierarchy - * does not contain validate roots. + * validate()} method should be invoked on the top-most invalid + * container of the hierarchy. *

    * Validating the container may be a quite time-consuming operation. For * performance reasons a developer may postpone the validation of the diff --git a/jdk/src/share/classes/java/awt/Toolkit.java b/jdk/src/share/classes/java/awt/Toolkit.java index 6e00e6afdbf..3ac2714dcef 100644 --- a/jdk/src/share/classes/java/awt/Toolkit.java +++ b/jdk/src/share/classes/java/awt/Toolkit.java @@ -466,10 +466,7 @@ public abstract class Toolkit { */ protected void loadSystemColors(int[] systemColors) throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } - + GraphicsEnvironment.checkHeadless(); } /** @@ -504,10 +501,7 @@ public abstract class Toolkit { */ public void setDynamicLayout(boolean dynamic) throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } - + GraphicsEnvironment.checkHeadless(); } /** @@ -531,9 +525,8 @@ public abstract class Toolkit { */ protected boolean isDynamicLayoutSet() throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit().isDynamicLayoutSet(); } else { @@ -569,9 +562,8 @@ public abstract class Toolkit { */ public boolean isDynamicLayoutActive() throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit().isDynamicLayoutActive(); } else { @@ -615,9 +607,7 @@ public abstract class Toolkit { */ public Insets getScreenInsets(GraphicsConfiguration gc) throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit().getScreenInsets(gc); } else { @@ -1200,10 +1190,7 @@ public abstract class Toolkit { * security manager's checkPermission method with a * RuntimePermission("queuePrintJob") permission. * - * @param frame the parent of the print dialog. May be null if and only - * if jobAttributes is not null and jobAttributes.getDialog() - * returns JobAttributes.DialogType.NONE or - * JobAttributes.DialogType.COMMON. + * @param frame the parent of the print dialog. May not be null. * @param jobtitle the title of the PrintJob. A null title is equivalent * to "". * @param jobAttributes a set of job attributes which will control the @@ -1359,9 +1346,8 @@ public abstract class Toolkit { * @since 1.4 */ public Clipboard getSystemSelection() throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit().getSystemSelection(); } else { @@ -1391,9 +1377,7 @@ public abstract class Toolkit { * @since JDK1.1 */ public int getMenuShortcutKeyMask() throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); return Event.CTRL_MASK; } @@ -1418,7 +1402,10 @@ public abstract class Toolkit { * @since 1.3 */ public boolean getLockingKeyState(int keyCode) - throws UnsupportedOperationException { + throws UnsupportedOperationException + { + GraphicsEnvironment.checkHeadless(); + if (! (keyCode == KeyEvent.VK_CAPS_LOCK || keyCode == KeyEvent.VK_NUM_LOCK || keyCode == KeyEvent.VK_SCROLL_LOCK || keyCode == KeyEvent.VK_KANA_LOCK)) { throw new IllegalArgumentException("invalid key for Toolkit.getLockingKeyState"); @@ -1449,7 +1436,10 @@ public abstract class Toolkit { * @since 1.3 */ public void setLockingKeyState(int keyCode, boolean on) - throws UnsupportedOperationException { + throws UnsupportedOperationException + { + GraphicsEnvironment.checkHeadless(); + if (! (keyCode == KeyEvent.VK_CAPS_LOCK || keyCode == KeyEvent.VK_NUM_LOCK || keyCode == KeyEvent.VK_SCROLL_LOCK || keyCode == KeyEvent.VK_KANA_LOCK)) { throw new IllegalArgumentException("invalid key for Toolkit.setLockingKeyState"); @@ -1523,9 +1513,8 @@ public abstract class Toolkit { */ public Dimension getBestCursorSize(int preferredWidth, int preferredHeight) throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + // Override to implement custom cursor support. if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit(). @@ -1553,9 +1542,8 @@ public abstract class Toolkit { * @since 1.2 */ public int getMaximumCursorColors() throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + // Override to implement custom cursor support. if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit().getMaximumCursorColors(); @@ -1605,9 +1593,8 @@ public abstract class Toolkit { public boolean isFrameStateSupported(int state) throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + if (this != Toolkit.getDefaultToolkit()) { return Toolkit.getDefaultToolkit(). isFrameStateSupported(state); @@ -2614,9 +2601,8 @@ public abstract class Toolkit { * @since 1.7 */ public boolean areExtraMouseButtonsEnabled() throws HeadlessException { - if (GraphicsEnvironment.isHeadless()){ - throw new HeadlessException(); - } + GraphicsEnvironment.checkHeadless(); + return Toolkit.getDefaultToolkit().areExtraMouseButtonsEnabled(); } } diff --git a/jdk/src/share/classes/java/lang/SafeVarargs.java b/jdk/src/share/classes/java/lang/SafeVarargs.java index b0c03a9162b..818ea21bafb 100644 --- a/jdk/src/share/classes/java/lang/SafeVarargs.java +++ b/jdk/src/share/classes/java/lang/SafeVarargs.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ import java.lang.annotation.*; * constructor does not perform potentially unsafe operations on its * varargs parameter. Applying this annotation to a method or * constructor suppresses unchecked warnings about a - * non-reifiable variable-arity (vararg) type and suppresses + * non-reifiable variable arity (vararg) type and suppresses * unchecked warnings about parameterized array creation at call * sites. * @@ -41,11 +41,10 @@ import java.lang.annotation.*; * additional usage restrictions on this annotation type; it is a * compile-time error if a method or constructor declaration is * annotated with a {@code @SafeVarargs} annotation, and either: - *

      - *
    • the declaration is a fixed-arity method or constructor + *
    • the declaration is a fixed arity method or constructor * - *
    • the declaration is a variable-arity method that is neither + *
    • the declaration is a variable arity method that is neither * {@code static} nor {@code final}. * *
    @@ -55,15 +54,28 @@ import java.lang.annotation.*; * *
      * - *
    • The variable-arity parameter has a reifiable element type, + *
    • The variable arity parameter has a reifiable element type, * which includes primitive types, {@code Object}, and {@code String}. * (The unchecked warnings this annotation type suppresses already do * not occur for a reifiable element type.) * *
    • The body of the method or constructor declaration performs * potentially unsafe operations, such as an assignment to an element - * of the variable-arity parameter's array that generates an unchecked - * warning. + * of the variable arity parameter's array that generates an unchecked + * warning. Some unsafe operations do not trigger an unchecked + * warning. For example, the aliasing in + * + *
      + * @SafeVarargs // Not actually safe!
      + * static void m(List<String>... stringLists) {
      + *   Object[] array = stringLists;
      + *   List<Integer> tmpList = Arrays.asList(42);
      + *   array[0] = tmpList; // Semantically invalid, but compiles without warnings
      + *   String s = stringLists[0].get(0); // Oh no, ClassCastException at runtime!
      + * }
      + * 
      + * + * leads to a {@code ClassCastException} at runtime. * *

      Future versions of the platform may mandate compiler errors for * such unsafe operations. diff --git a/jdk/src/share/classes/java/lang/Throwable.java b/jdk/src/share/classes/java/lang/Throwable.java index d70fe26841a..2319aafd00f 100644 --- a/jdk/src/share/classes/java/lang/Throwable.java +++ b/jdk/src/share/classes/java/lang/Throwable.java @@ -336,7 +336,10 @@ public class Throwable implements Serializable { * Disabling of suppression should only occur in exceptional * circumstances where special requirements exist, such as a * virtual machine reusing exception objects under low-memory - * situations. + * situations. Circumstances where a given exception object is + * repeatedly caught and rethrown, such as to implement control + * flow between two sub-systems, is another situation where + * immutable throwable objects would be appropriate. * * @param message the detail message. * @param cause the cause. (A {@code null} value is permitted, @@ -423,6 +426,18 @@ public class Throwable implements Serializable { * {@link #Throwable(String,Throwable)}, this method cannot be called * even once. * + *

      An example of using this method on a legacy throwable type + * without other support for setting the cause is: + * + *

      +     * try {
      +     *     lowLevelOp();
      +     * } catch (LowLevelException le) {
      +     *     throw (HighLevelException)
      +     *           new HighLevelException().initCause(le); // Legacy constructor
      +     * }
      +     * 
      + * * @param cause the cause (which is saved for later retrieval by the * {@link #getCause()} method). (A {@code null} value is * permitted, and indicates that the cause is nonexistent or @@ -762,7 +777,8 @@ public class Throwable implements Serializable { * @see java.lang.Throwable#printStackTrace() */ public synchronized Throwable fillInStackTrace() { - if (stackTrace != null) { + if (stackTrace != null || + backtrace != null /* Out of protocol state */ ) { fillInStackTrace(0); stackTrace = UNASSIGNED_STACK; } @@ -788,7 +804,8 @@ public class Throwable implements Serializable { * this throwable is permitted to return a zero-length array from this * method. Generally speaking, the array returned by this method will * contain one element for every frame that would be printed by - * {@code printStackTrace}. + * {@code printStackTrace}. Writes to the returned array do not + * affect future calls to this method. * * @return an array of stack trace elements representing the stack trace * pertaining to this throwable. @@ -801,7 +818,8 @@ public class Throwable implements Serializable { private synchronized StackTraceElement[] getOurStackTrace() { // Initialize stack trace field with information from // backtrace if this is the first call to this method - if (stackTrace == UNASSIGNED_STACK) { + if (stackTrace == UNASSIGNED_STACK || + (stackTrace == null && backtrace != null) /* Out of protocol state */) { int depth = getStackTraceDepth(); stackTrace = new StackTraceElement[depth]; for (int i=0; i < depth; i++) @@ -849,7 +867,8 @@ public class Throwable implements Serializable { } synchronized (this) { - if (this.stackTrace == null) // Immutable stack + if (this.stackTrace == null && // Immutable stack + backtrace == null) // Test for out of protocol state return; this.stackTrace = defensiveCopy; } @@ -971,8 +990,8 @@ public class Throwable implements Serializable { /** * Appends the specified exception to the exceptions that were * suppressed in order to deliver this exception. This method is - * typically called (automatically and implicitly) by the {@code - * try}-with-resources statement. + * thread-safe and typically called (automatically and implicitly) + * by the {@code try}-with-resources statement. * *

      The suppression behavior is enabled unless disabled * {@linkplain #Throwable(String, Throwable, boolean, boolean) via @@ -1043,7 +1062,9 @@ public class Throwable implements Serializable { * * If no exceptions were suppressed or {@linkplain * #Throwable(String, Throwable, boolean, boolean) suppression is - * disabled}, an empty array is returned. + * disabled}, an empty array is returned. This method is + * thread-safe. Writes to the returned array do not affect future + * calls to this method. * * @return an array containing all of the exceptions that were * suppressed to deliver this exception. diff --git a/jdk/src/share/classes/java/lang/invoke/AdapterMethodHandle.java b/jdk/src/share/classes/java/lang/invoke/AdapterMethodHandle.java index a47b38f8bb3..a77c904eb0e 100644 --- a/jdk/src/share/classes/java/lang/invoke/AdapterMethodHandle.java +++ b/jdk/src/share/classes/java/lang/invoke/AdapterMethodHandle.java @@ -27,6 +27,7 @@ package java.lang.invoke; import sun.invoke.util.VerifyType; import sun.invoke.util.Wrapper; +import sun.invoke.util.ValueConversions; import java.util.Arrays; import static java.lang.invoke.MethodHandleNatives.Constants.*; import static java.lang.invoke.MethodHandleStatics.*; @@ -55,29 +56,35 @@ class AdapterMethodHandle extends BoundMethodHandle { this(target, newType, conv, null); } + int getConversion() { return conversion; } + // TO DO: When adapting another MH with a null conversion, clone // the target and change its type, instead of adding another layer. /** Can a JVM-level adapter directly implement the proposed * argument conversions, as if by MethodHandles.convertArguments? */ - static boolean canPairwiseConvert(MethodType newType, MethodType oldType) { + static boolean canPairwiseConvert(MethodType newType, MethodType oldType, int level) { // same number of args, of course int len = newType.parameterCount(); if (len != oldType.parameterCount()) return false; - // Check return type. (Not much can be done with it.) + // Check return type. Class exp = newType.returnType(); Class ret = oldType.returnType(); - if (!VerifyType.isNullConversion(ret, exp)) - return false; + if (!VerifyType.isNullConversion(ret, exp)) { + if (!convOpSupported(OP_COLLECT_ARGS)) + return false; + if (!canConvertArgument(ret, exp, level)) + return false; + } // Check args pairwise. for (int i = 0; i < len; i++) { Class src = newType.parameterType(i); // source type Class dst = oldType.parameterType(i); // destination type - if (!canConvertArgument(src, dst)) + if (!canConvertArgument(src, dst, level)) return false; } @@ -87,11 +94,14 @@ class AdapterMethodHandle extends BoundMethodHandle { /** Can a JVM-level adapter directly implement the proposed * argument conversion, as if by MethodHandles.convertArguments? */ - static boolean canConvertArgument(Class src, Class dst) { + static boolean canConvertArgument(Class src, Class dst, int level) { // ? Retool this logic to use RETYPE_ONLY, CHECK_CAST, etc., as opcodes, // so we don't need to repeat so much decision making. if (VerifyType.isNullConversion(src, dst)) { return true; + } else if (convOpSupported(OP_COLLECT_ARGS)) { + // If we can build filters, we can convert anything to anything. + return true; } else if (src.isPrimitive()) { if (dst.isPrimitive()) return canPrimCast(src, dst); @@ -99,7 +109,7 @@ class AdapterMethodHandle extends BoundMethodHandle { return canBoxArgument(src, dst); } else { if (dst.isPrimitive()) - return canUnboxArgument(src, dst); + return canUnboxArgument(src, dst, level); else return true; // any two refs can be interconverted } @@ -109,21 +119,20 @@ class AdapterMethodHandle extends BoundMethodHandle { * Create a JVM-level adapter method handle to conform the given method * handle to the similar newType, using only pairwise argument conversions. * For each argument, convert incoming argument to the exact type needed. - * Only null conversions are allowed on the return value (until - * the JVM supports ricochet adapters). - * The argument conversions allowed are casting, unboxing, + * The argument conversions allowed are casting, boxing and unboxing, * integral widening or narrowing, and floating point widening or narrowing. * @param newType required call type * @param target original method handle + * @param level which strength of conversion is allowed * @return an adapter to the original handle with the desired new type, * or the original target if the types are already identical * or null if the adaptation cannot be made */ - static MethodHandle makePairwiseConvert(MethodType newType, MethodHandle target) { + static MethodHandle makePairwiseConvert(MethodType newType, MethodHandle target, int level) { MethodType oldType = target.type(); if (newType == oldType) return target; - if (!canPairwiseConvert(newType, oldType)) + if (!canPairwiseConvert(newType, oldType, level)) return null; // (after this point, it is an assertion error to fail to convert) @@ -138,9 +147,14 @@ class AdapterMethodHandle extends BoundMethodHandle { break; } } + + Class needReturn = newType.returnType(); + Class haveReturn = oldType.returnType(); + boolean retConv = !VerifyType.isNullConversion(haveReturn, needReturn); + // Now build a chain of one or more adapters. - MethodHandle adapter = target; - MethodType midType = oldType.changeReturnType(newType.returnType()); + MethodHandle adapter = target, adapter2; + MethodType midType = oldType; for (int i = 0; i <= lastConv; i++) { Class src = newType.parameterType(i); // source type Class dst = midType.parameterType(i); // destination type @@ -149,22 +163,23 @@ class AdapterMethodHandle extends BoundMethodHandle { continue; } // Work the current type backward toward the desired caller type: - if (i != lastConv) { - midType = midType.changeParameterType(i, src); - } else { + midType = midType.changeParameterType(i, src); + if (i == lastConv) { // When doing the last (or only) real conversion, // force all remaining null conversions to happen also. - assert(VerifyType.isNullConversion(newType, midType.changeParameterType(i, src))); - midType = newType; + MethodType lastMidType = newType; + if (retConv) lastMidType = lastMidType.changeReturnType(haveReturn); + assert(VerifyType.isNullConversion(lastMidType, midType)); + midType = lastMidType; } // Tricky case analysis follows. // It parallels canConvertArgument() above. if (src.isPrimitive()) { if (dst.isPrimitive()) { - adapter = makePrimCast(midType, adapter, i, dst); + adapter2 = makePrimCast(midType, adapter, i, dst); } else { - adapter = makeBoxArgument(midType, adapter, i, dst); + adapter2 = makeBoxArgument(midType, adapter, i, src); } } else { if (dst.isPrimitive()) { @@ -174,29 +189,53 @@ class AdapterMethodHandle extends BoundMethodHandle { // conversions supported by reflect.Method.invoke. // Those conversions require a big nest of if/then/else logic, // which we prefer to make a user responsibility. - adapter = makeUnboxArgument(midType, adapter, i, dst); + adapter2 = makeUnboxArgument(midType, adapter, i, dst, level); } else { // Simple reference conversion. // Note: Do not check for a class hierarchy relation // between src and dst. In all cases a 'null' argument // will pass the cast conversion. - adapter = makeCheckCast(midType, adapter, i, dst); + adapter2 = makeCheckCast(midType, adapter, i, dst); } } - assert(adapter != null); - assert(adapter.type() == midType); + assert(adapter2 != null) : Arrays.asList(src, dst, midType, adapter, i, target, newType); + assert(adapter2.type() == midType); + adapter = adapter2; + } + if (retConv) { + adapter2 = makeReturnConversion(adapter, haveReturn, needReturn); + assert(adapter2 != null); + adapter = adapter2; } if (adapter.type() != newType) { // Only trivial conversions remain. - adapter = makeRetypeOnly(newType, adapter); - assert(adapter != null); + adapter2 = makeRetypeOnly(newType, adapter); + assert(adapter2 != null); + adapter = adapter2; // Actually, that's because there were no non-trivial ones: - assert(lastConv == -1); + assert(lastConv == -1 || retConv); } assert(adapter.type() == newType); return adapter; } + private static MethodHandle makeReturnConversion(MethodHandle target, Class haveReturn, Class needReturn) { + MethodHandle adjustReturn; + if (haveReturn == void.class) { + // synthesize a zero value for the given void + Object zero = Wrapper.forBasicType(needReturn).zero(); + adjustReturn = MethodHandles.constant(needReturn, zero); + } else { + MethodType needConversion = MethodType.methodType(needReturn, haveReturn); + adjustReturn = MethodHandles.identity(needReturn).asType(needConversion); + } + if (!canCollectArguments(adjustReturn.type(), target.type(), 0, false)) { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this code is deprecated + throw new InternalError("NYI"); + } + return makeCollectArguments(adjustReturn, target, 0, false); + } + /** * Create a JVM-level adapter method handle to permute the arguments * of the given method. @@ -224,7 +263,7 @@ class AdapterMethodHandle extends BoundMethodHandle { if (argumentMap.length != oldType.parameterCount()) throw newIllegalArgumentException("bad permutation: "+Arrays.toString(argumentMap)); if (nullPermutation) { - MethodHandle res = makePairwiseConvert(newType, target); + MethodHandle res = makePairwiseConvert(newType, target, 0); // well, that was easy if (res == null) throw newIllegalArgumentException("cannot convert pairwise: "+newType); @@ -310,11 +349,25 @@ class AdapterMethodHandle extends BoundMethodHandle { return (spChange & CONV_STACK_MOVE_MASK) << CONV_STACK_MOVE_SHIFT; } + static int extractStackMove(int convOp) { + int spChange = convOp >> CONV_STACK_MOVE_SHIFT; + return spChange / MethodHandleNatives.JVM_STACK_MOVE_UNIT; + } + + static int extractStackMove(MethodHandle target) { + if (target instanceof AdapterMethodHandle) { + AdapterMethodHandle amh = (AdapterMethodHandle) target; + return extractStackMove(amh.getConversion()); + } else { + return 0; + } + } + /** Construct an adapter conversion descriptor for a single-argument conversion. */ private static long makeConv(int convOp, int argnum, int src, int dest) { - assert(src == (src & 0xF)); - assert(dest == (dest & 0xF)); - assert(convOp >= OP_CHECK_CAST && convOp <= OP_PRIM_TO_REF); + assert(src == (src & CONV_TYPE_MASK)); + assert(dest == (dest & CONV_TYPE_MASK)); + assert(convOp >= OP_CHECK_CAST && convOp <= OP_PRIM_TO_REF || convOp == OP_COLLECT_ARGS); int stackMove = type2size(dest) - type2size(src); return ((long) argnum << 32 | (long) convOp << CONV_OP_SHIFT | @@ -323,11 +376,10 @@ class AdapterMethodHandle extends BoundMethodHandle { insertStackMove(stackMove) ); } - private static long makeConv(int convOp, int argnum, int stackMove) { - assert(convOp >= OP_DUP_ARGS && convOp <= OP_SPREAD_ARGS); + private static long makeDupConv(int convOp, int argnum, int stackMove) { + // simple argument motion, requiring one slot to specify + assert(convOp == OP_DUP_ARGS || convOp == OP_DROP_ARGS); byte src = 0, dest = 0; - if (convOp >= OP_COLLECT_ARGS && convOp <= OP_SPREAD_ARGS) - src = dest = T_OBJECT; return ((long) argnum << 32 | (long) convOp << CONV_OP_SHIFT | (int) src << CONV_SRC_TYPE_SHIFT | @@ -336,7 +388,8 @@ class AdapterMethodHandle extends BoundMethodHandle { ); } private static long makeSwapConv(int convOp, int srcArg, byte type, int destSlot) { - assert(convOp >= OP_SWAP_ARGS && convOp <= OP_ROT_ARGS); + // more complex argument motion, requiring two slots to specify + assert(convOp == OP_SWAP_ARGS || convOp == OP_ROT_ARGS); return ((long) srcArg << 32 | (long) convOp << CONV_OP_SHIFT | (int) type << CONV_SRC_TYPE_SHIFT | @@ -344,6 +397,18 @@ class AdapterMethodHandle extends BoundMethodHandle { (int) destSlot << CONV_VMINFO_SHIFT ); } + private static long makeSpreadConv(int convOp, int argnum, int src, int dest, int stackMove) { + // spreading or collecting, at a particular slot location + assert(convOp == OP_SPREAD_ARGS || convOp == OP_COLLECT_ARGS || convOp == OP_FOLD_ARGS); + // src = spread ? T_OBJECT (for array) : common type of collected args (else void) + // dest = spread ? element type of array : result type of collector (can be void) + return ((long) argnum << 32 | + (long) convOp << CONV_OP_SHIFT | + (int) src << CONV_SRC_TYPE_SHIFT | + (int) dest << CONV_DEST_TYPE_SHIFT | + insertStackMove(stackMove) + ); + } private static long makeConv(int convOp) { assert(convOp == OP_RETYPE_ONLY || convOp == OP_RETYPE_RAW); return ((long)-1 << 32) | (convOp << CONV_OP_SHIFT); // stackMove, src, dst all zero @@ -570,14 +635,10 @@ class AdapterMethodHandle extends BoundMethodHandle { static boolean canPrimCast(Class src, Class dst) { if (src == dst || !src.isPrimitive() || !dst.isPrimitive()) { return false; - } else if (Wrapper.forPrimitiveType(dst).isFloating()) { - // both must be floating types - return Wrapper.forPrimitiveType(src).isFloating(); } else { - // both are integral, and all combinations work fine - assert(Wrapper.forPrimitiveType(src).isIntegral() && - Wrapper.forPrimitiveType(dst).isIntegral()); - return true; + boolean sflt = Wrapper.forPrimitiveType(src).isFloating(); + boolean dflt = Wrapper.forPrimitiveType(dst).isFloating(); + return !(sflt | dflt); // no float support at present } } @@ -589,6 +650,29 @@ class AdapterMethodHandle extends BoundMethodHandle { */ static MethodHandle makePrimCast(MethodType newType, MethodHandle target, int arg, Class convType) { + Class src = newType.parameterType(arg); + if (canPrimCast(src, convType)) + return makePrimCastOnly(newType, target, arg, convType); + Class dst = convType; + boolean sflt = Wrapper.forPrimitiveType(src).isFloating(); + boolean dflt = Wrapper.forPrimitiveType(dst).isFloating(); + if (sflt | dflt) { + MethodHandle convMethod; + if (sflt) + convMethod = ((src == double.class) + ? ValueConversions.convertFromDouble(dst) + : ValueConversions.convertFromFloat(dst)); + else + convMethod = ((dst == double.class) + ? ValueConversions.convertToDouble(src) + : ValueConversions.convertToFloat(src)); + long conv = makeConv(OP_COLLECT_ARGS, arg, basicType(src), basicType(dst)); + return new AdapterMethodHandle(target, newType, conv, convMethod); + } + throw new InternalError("makePrimCast"); + } + static MethodHandle makePrimCastOnly(MethodType newType, MethodHandle target, + int arg, Class convType) { MethodType oldType = target.type(); if (!canPrimCast(newType, oldType, arg, convType)) return null; @@ -602,7 +686,7 @@ class AdapterMethodHandle extends BoundMethodHandle { * The convType is the unboxed type; it can be either a primitive or wrapper. */ static boolean canUnboxArgument(MethodType newType, MethodType targetType, - int arg, Class convType) { + int arg, Class convType, int level) { if (!convOpSupported(OP_REF_TO_PRIM)) return false; Class src = newType.parameterType(arg); Class dst = targetType.parameterType(arg); @@ -616,21 +700,31 @@ class AdapterMethodHandle extends BoundMethodHandle { return (diff == arg+1); // arg is sole non-trivial diff } /** Can an primitive unboxing adapter validly convert src to dst? */ - static boolean canUnboxArgument(Class src, Class dst) { - return (!src.isPrimitive() && Wrapper.asPrimitiveType(dst).isPrimitive()); + static boolean canUnboxArgument(Class src, Class dst, int level) { + assert(dst.isPrimitive()); + // if we have JVM support for boxing, we can also do complex unboxing + if (convOpSupported(OP_PRIM_TO_REF)) return true; + Wrapper dw = Wrapper.forPrimitiveType(dst); + // Level 0 means cast and unbox. This works on any reference. + if (level == 0) return !src.isPrimitive(); + assert(level >= 0 && level <= 2); + // Levels 1 and 2 allow widening and/or narrowing conversions. + // These are not supported directly by the JVM. + // But if the input reference is monomorphic, we can do it. + return dw.wrapperType() == src; } /** Factory method: Unbox the given argument. * Return null if this cannot be done. */ static MethodHandle makeUnboxArgument(MethodType newType, MethodHandle target, - int arg, Class convType) { + int arg, Class convType, int level) { MethodType oldType = target.type(); Class src = newType.parameterType(arg); Class dst = oldType.parameterType(arg); Class boxType = Wrapper.asWrapperType(convType); Class primType = Wrapper.asPrimitiveType(convType); - if (!canUnboxArgument(newType, oldType, arg, convType)) + if (!canUnboxArgument(newType, oldType, arg, convType, level)) return null; MethodType castDone = newType; if (!VerifyType.isNullConversion(src, boxType)) @@ -642,19 +736,46 @@ class AdapterMethodHandle extends BoundMethodHandle { return makeCheckCast(newType, adapter, arg, boxType); } + /** Can a boxing conversion validly convert src to dst? */ + static boolean canBoxArgument(MethodType newType, MethodType targetType, + int arg, Class convType) { + if (!convOpSupported(OP_PRIM_TO_REF)) return false; + Class src = newType.parameterType(arg); + Class dst = targetType.parameterType(arg); + Class boxType = Wrapper.asWrapperType(convType); + convType = Wrapper.asPrimitiveType(convType); + if (!canCheckCast(boxType, dst) + || boxType == convType + || !VerifyType.isNullConversion(src, convType)) + return false; + int diff = diffTypes(newType, targetType, false); + return (diff == arg+1); // arg is sole non-trivial diff + } + /** Can an primitive boxing adapter validly convert src to dst? */ static boolean canBoxArgument(Class src, Class dst) { if (!convOpSupported(OP_PRIM_TO_REF)) return false; - throw new UnsupportedOperationException("NYI"); + return (src.isPrimitive() && !dst.isPrimitive()); } - /** Factory method: Unbox the given argument. + /** Factory method: Box the given argument. * Return null if this cannot be done. */ static MethodHandle makeBoxArgument(MethodType newType, MethodHandle target, int arg, Class convType) { - // this is difficult to do in the JVM because it must GC - return null; + MethodType oldType = target.type(); + Class src = newType.parameterType(arg); + Class dst = oldType.parameterType(arg); + Class boxType = Wrapper.asWrapperType(convType); + Class primType = Wrapper.asPrimitiveType(convType); + if (!canBoxArgument(newType, oldType, arg, convType)) { + return null; + } + if (!VerifyType.isNullConversion(boxType, dst)) + target = makeCheckCast(oldType.changeParameterType(arg, boxType), target, arg, dst); + MethodHandle boxerMethod = ValueConversions.box(Wrapper.forPrimitiveType(primType)); + long conv = makeConv(OP_PRIM_TO_REF, arg, basicType(primType), T_OBJECT); + return new AdapterMethodHandle(target, newType, conv, boxerMethod); } /** Can an adapter simply drop arguments to convert the target to newType? */ @@ -699,7 +820,7 @@ class AdapterMethodHandle extends BoundMethodHandle { int slotCount = keep1InSlot - dropSlot; assert(slotCount >= dropArgCount); assert(target.type().parameterSlotCount() + slotCount == newType.parameterSlotCount()); - long conv = makeConv(OP_DROP_ARGS, dropArgPos + dropArgCount - 1, -slotCount); + long conv = makeDupConv(OP_DROP_ARGS, dropArgPos + dropArgCount - 1, -slotCount); return new AdapterMethodHandle(target, newType, conv); } @@ -739,7 +860,7 @@ class AdapterMethodHandle extends BoundMethodHandle { int keep1InSlot = newType.parameterSlotDepth(dupArgPos); int slotCount = keep1InSlot - dupSlot; assert(target.type().parameterSlotCount() - slotCount == newType.parameterSlotCount()); - long conv = makeConv(OP_DUP_ARGS, dupArgPos + dupArgCount - 1, slotCount); + long conv = makeDupConv(OP_DUP_ARGS, dupArgPos + dupArgCount - 1, slotCount); return new AdapterMethodHandle(target, newType, conv); } @@ -900,7 +1021,7 @@ class AdapterMethodHandle extends BoundMethodHandle { for (int i = 0; i < spreadArgCount; i++) { Class src = VerifyType.spreadArgElementType(spreadArgType, i); Class dst = targetType.parameterType(spreadArgPos + i); - if (src == null || !VerifyType.isNullConversion(src, dst)) + if (src == null || !canConvertArgument(src, dst, 1)) return false; } return true; @@ -910,24 +1031,100 @@ class AdapterMethodHandle extends BoundMethodHandle { /** Factory method: Spread selected argument. */ static MethodHandle makeSpreadArguments(MethodType newType, MethodHandle target, Class spreadArgType, int spreadArgPos, int spreadArgCount) { + // FIXME: Get rid of newType; derive new arguments from structure of spreadArgType MethodType targetType = target.type(); if (!canSpreadArguments(newType, targetType, spreadArgType, spreadArgPos, spreadArgCount)) return null; + // dest is not significant; remove? + int dest = T_VOID; + for (int i = 0; i < spreadArgCount; i++) { + Class arg = VerifyType.spreadArgElementType(spreadArgType, i); + if (arg == null) arg = Object.class; + int dest2 = basicType(arg); + if (dest == T_VOID) dest = dest2; + else if (dest != dest2) dest = T_VOID; + if (dest == T_VOID) break; + targetType = targetType.changeParameterType(spreadArgPos + i, arg); + } + target = target.asType(targetType); + int arrayArgSize = 1; // always a reference // in arglist: [0: ...keep1 | spos: spreadArg | spos+1: keep2... ] // out arglist: [0: ...keep1 | spos: spread... | spos+scount: keep2... ] int keep2OutPos = spreadArgPos + spreadArgCount; - int spreadSlot = targetType.parameterSlotDepth(keep2OutPos); - int keep1OutSlot = targetType.parameterSlotDepth(spreadArgPos); - int slotCount = keep1OutSlot - spreadSlot; - assert(spreadSlot == newType.parameterSlotDepth(spreadArgPos+1)); + int keep1OutSlot = targetType.parameterSlotDepth(spreadArgPos); // leading edge of |spread...| + int spreadSlot = targetType.parameterSlotDepth(keep2OutPos); // trailing edge of |spread...| + assert(spreadSlot == newType.parameterSlotDepth(spreadArgPos+arrayArgSize)); + int slotCount = keep1OutSlot - spreadSlot; // slots in |spread...| assert(slotCount >= spreadArgCount); - long conv = makeConv(OP_SPREAD_ARGS, spreadArgPos, slotCount-1); + int stackMove = - arrayArgSize + slotCount; // pop array, push N slots + long conv = makeSpreadConv(OP_SPREAD_ARGS, spreadArgPos, T_OBJECT, dest, stackMove); MethodHandle res = new AdapterMethodHandle(target, newType, conv, spreadArgType); assert(res.type().parameterType(spreadArgPos) == spreadArgType); return res; } - // TO DO: makeCollectArguments, makeFlyby, makeRicochet + /** Can an adapter collect a series of arguments, replacing them by zero or one results? */ + static boolean canCollectArguments(MethodType targetType, + MethodType collectorType, int collectArgPos, boolean retainOriginalArgs) { + if (!convOpSupported(retainOriginalArgs ? OP_FOLD_ARGS : OP_COLLECT_ARGS)) return false; + int collectArgCount = collectorType.parameterCount(); + Class rtype = collectorType.returnType(); + assert(rtype == void.class || targetType.parameterType(collectArgPos) == rtype) + // [(Object)Object[], (Object[])Object[], 0, 1] + : Arrays.asList(targetType, collectorType, collectArgPos, collectArgCount) + ; + return true; + } + + /** Factory method: Collect or filter selected argument(s). */ + static MethodHandle makeCollectArguments(MethodHandle target, + MethodHandle collector, int collectArgPos, boolean retainOriginalArgs) { + assert(canCollectArguments(target.type(), collector.type(), collectArgPos, retainOriginalArgs)); + MethodType targetType = target.type(); + MethodType collectorType = collector.type(); + int collectArgCount = collectorType.parameterCount(); + Class collectValType = collectorType.returnType(); + int collectValCount = (collectValType == void.class ? 0 : 1); + int collectValSlots = collectorType.returnSlotCount(); + MethodType newType = targetType + .dropParameterTypes(collectArgPos, collectArgPos+collectValCount); + if (!retainOriginalArgs) { + newType = newType + .insertParameterTypes(collectArgPos, collectorType.parameterList()); + } else { + // parameter types at the fold point must be the same + assert(diffParamTypes(newType, collectArgPos, targetType, collectValCount, collectArgCount, false) == 0) + : Arrays.asList(target, collector, collectArgPos, retainOriginalArgs); + } + // in arglist: [0: ...keep1 | cpos: collect... | cpos+cacount: keep2... ] + // out arglist: [0: ...keep1 | cpos: collectVal? | cpos+cvcount: keep2... ] + // out(retain): [0: ...keep1 | cpos: cV? coll... | cpos+cvc+cac: keep2... ] + int keep2InPos = collectArgPos + collectArgCount; + int keep1InSlot = newType.parameterSlotDepth(collectArgPos); // leading edge of |collect...| + int collectSlot = newType.parameterSlotDepth(keep2InPos); // trailing edge of |collect...| + int slotCount = keep1InSlot - collectSlot; // slots in |collect...| + assert(slotCount >= collectArgCount); + assert(collectSlot == targetType.parameterSlotDepth( + collectArgPos + collectValCount + (retainOriginalArgs ? collectArgCount : 0) )); + int dest = basicType(collectValType); + int src = T_VOID; + // src is not significant; remove? + for (int i = 0; i < collectArgCount; i++) { + int src2 = basicType(collectorType.parameterType(i)); + if (src == T_VOID) src = src2; + else if (src != src2) src = T_VOID; + if (src == T_VOID) break; + } + int stackMove = collectValSlots; // push 0..2 results + if (!retainOriginalArgs) stackMove -= slotCount; // pop N arguments + int lastCollectArg = keep2InPos-1; + long conv = makeSpreadConv(retainOriginalArgs ? OP_FOLD_ARGS : OP_COLLECT_ARGS, + lastCollectArg, src, dest, stackMove); + MethodHandle res = new AdapterMethodHandle(target, newType, conv, collector); + assert(res.type().parameterList().subList(collectArgPos, collectArgPos+collectArgCount) + .equals(collector.type().parameterList())); + return res; + } @Override public String toString() { diff --git a/jdk/src/share/classes/java/lang/invoke/CallSite.java b/jdk/src/share/classes/java/lang/invoke/CallSite.java index b2da146d261..0924a438c80 100644 --- a/jdk/src/share/classes/java/lang/invoke/CallSite.java +++ b/jdk/src/share/classes/java/lang/invoke/CallSite.java @@ -273,9 +273,9 @@ public class CallSite { Object binding; info = maybeReBox(info); if (info == null) { - binding = bootstrapMethod.invokeGeneric(caller, name, type); + binding = bootstrapMethod.invoke(caller, name, type); } else if (!info.getClass().isArray()) { - binding = bootstrapMethod.invokeGeneric(caller, name, type, info); + binding = bootstrapMethod.invoke(caller, name, type, info); } else { Object[] argv = (Object[]) info; maybeReBoxElements(argv); @@ -283,10 +283,10 @@ public class CallSite { throw new BootstrapMethodError("too many bootstrap method arguments"); MethodType bsmType = bootstrapMethod.type(); if (bsmType.parameterCount() == 4 && bsmType.parameterType(3) == Object[].class) - binding = bootstrapMethod.invokeGeneric(caller, name, type, argv); + binding = bootstrapMethod.invoke(caller, name, type, argv); else binding = MethodHandles.spreadInvoker(bsmType, 3) - .invokeGeneric(bootstrapMethod, caller, name, type, argv); + .invoke(bootstrapMethod, caller, name, type, argv); } //System.out.println("BSM for "+name+type+" => "+binding); if (binding instanceof CallSite) { diff --git a/jdk/src/share/classes/java/lang/invoke/FilterGeneric.java b/jdk/src/share/classes/java/lang/invoke/FilterGeneric.java index 6c3395002f5..769289710d9 100644 --- a/jdk/src/share/classes/java/lang/invoke/FilterGeneric.java +++ b/jdk/src/share/classes/java/lang/invoke/FilterGeneric.java @@ -61,6 +61,10 @@ class FilterGeneric { return ad; } + static { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this class is deprecated + } + Adapter makeInstance(Kind kind, int pos, MethodHandle filter, MethodHandle target) { Adapter ad = getAdapter(kind, pos); return ad.makeInstance(ad.prototypeEntryPoint(), filter, target); diff --git a/jdk/src/share/classes/java/lang/invoke/FilterOneArgument.java b/jdk/src/share/classes/java/lang/invoke/FilterOneArgument.java index 64a9f072797..f52c8c06f0a 100644 --- a/jdk/src/share/classes/java/lang/invoke/FilterOneArgument.java +++ b/jdk/src/share/classes/java/lang/invoke/FilterOneArgument.java @@ -67,6 +67,10 @@ class FilterOneArgument extends BoundMethodHandle { this.target = target; } + static { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this class is deprecated + } + public static MethodHandle make(MethodHandle filter, MethodHandle target) { if (filter == null) return target; if (target == null) return filter; diff --git a/jdk/src/share/classes/java/lang/invoke/FromGeneric.java b/jdk/src/share/classes/java/lang/invoke/FromGeneric.java index 1c0523a4a36..1e035a46fb1 100644 --- a/jdk/src/share/classes/java/lang/invoke/FromGeneric.java +++ b/jdk/src/share/classes/java/lang/invoke/FromGeneric.java @@ -98,6 +98,10 @@ class FromGeneric { this.unboxingInvoker = computeUnboxingInvoker(targetType, internalType0); } + static { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this class is deprecated + } + /** * The typed target will be called according to targetType. * The adapter code will in fact see the raw result from internalType, @@ -112,10 +116,10 @@ class FromGeneric { assert(iret == Object.class); return ValueConversions.identity(); } else if (wrap.primitiveType() == iret) { - return ValueConversions.box(wrap, false); + return ValueConversions.box(wrap); } else { assert(tret == double.class ? iret == long.class : iret == int.class); - return ValueConversions.boxRaw(wrap, false); + return ValueConversions.boxRaw(wrap); } } @@ -135,7 +139,7 @@ class FromGeneric { MethodType fixArgsType = internalType.changeReturnType(targetType.returnType()); MethodHandle fixArgs = MethodHandleImpl.convertArguments( invoker, Invokers.invokerType(fixArgsType), - invoker.type(), null); + invoker.type(), 0); if (fixArgs == null) throw new InternalError("bad fixArgs"); // reinterpret the calling sequence as raw: @@ -160,7 +164,6 @@ class FromGeneric { /** Build an adapter of the given generic type, which invokes typedTarget * on the incoming arguments, after unboxing as necessary. * The return value is boxed if necessary. - * @param genericType the required type of the result * @param typedTarget the target * @return an adapter method handle */ @@ -231,7 +234,7 @@ class FromGeneric { } static Adapter buildAdapterFromBytecodes(MethodType internalType) { - throw new UnsupportedOperationException("NYI"); + throw new UnsupportedOperationException("NYI "+internalType); } /** diff --git a/jdk/src/share/classes/java/lang/invoke/InvokeGeneric.java b/jdk/src/share/classes/java/lang/invoke/InvokeGeneric.java index a235e7318da..a747090b9a1 100644 --- a/jdk/src/share/classes/java/lang/invoke/InvokeGeneric.java +++ b/jdk/src/share/classes/java/lang/invoke/InvokeGeneric.java @@ -29,12 +29,12 @@ import sun.invoke.util.*; import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; /** - * Adapters which manage MethodHandle.invokeGeneric calls. + * Adapters which manage inexact MethodHandle.invoke calls. * The JVM calls one of these when the exact type match fails. * @author jrose */ class InvokeGeneric { - // erased type for the call, which originates from an invokeGeneric site + // erased type for the call, which originates from an inexact invoke site private final MethodType erasedCallerType; // an invoker of type (MT, MH; A...) -> R private final MethodHandle initialInvoker; @@ -56,7 +56,7 @@ class InvokeGeneric { } /** Return the adapter information for this type's erasure. */ - /*non-public*/ static MethodHandle genericInvokerOf(MethodType erasedCallerType) throws ReflectiveOperationException { + /*non-public*/ static MethodHandle generalInvokerOf(MethodType erasedCallerType) throws ReflectiveOperationException { InvokeGeneric gen = new InvokeGeneric(erasedCallerType); return gen.initialInvoker; } diff --git a/jdk/src/share/classes/java/lang/invoke/Invokers.java b/jdk/src/share/classes/java/lang/invoke/Invokers.java index 4eeb36f76b7..7bf134cc858 100644 --- a/jdk/src/share/classes/java/lang/invoke/Invokers.java +++ b/jdk/src/share/classes/java/lang/invoke/Invokers.java @@ -43,10 +43,10 @@ class Invokers { private /*lazy*/ MethodHandle erasedInvoker; /*lazy*/ MethodHandle erasedInvokerWithDrops; // for InvokeGeneric - // generic (untyped) invoker for the outgoing call - private /*lazy*/ MethodHandle genericInvoker; + // general invoker for the outgoing call + private /*lazy*/ MethodHandle generalInvoker; - // generic (untyped) invoker for the outgoing call; accepts a single Object[] + // general invoker for the outgoing call; accepts a single Object[] private final /*lazy*/ MethodHandle[] spreadInvokers; // invoker for an unbound callsite @@ -77,13 +77,13 @@ class Invokers { return invoker; } - /*non-public*/ MethodHandle genericInvoker() { + /*non-public*/ MethodHandle generalInvoker() { MethodHandle invoker1 = exactInvoker(); - MethodHandle invoker = genericInvoker; + MethodHandle invoker = generalInvoker; if (invoker != null) return invoker; - MethodType genericType = targetType.generic(); - invoker = MethodHandles.convertArguments(invoker1, invokerType(genericType)); - genericInvoker = invoker; + MethodType generalType = targetType.generic(); + invoker = invoker1.asType(invokerType(generalType)); + generalInvoker = invoker; return invoker; } @@ -93,9 +93,9 @@ class Invokers { if (invoker != null) return invoker; MethodType erasedType = targetType.erase(); if (erasedType == targetType.generic()) - invoker = genericInvoker(); + invoker = generalInvoker(); else - invoker = MethodHandles.convertArguments(invoker1, invokerType(erasedType)); + invoker = invoker1.asType(invokerType(erasedType)); erasedInvoker = invoker; return invoker; } @@ -103,7 +103,7 @@ class Invokers { /*non-public*/ MethodHandle spreadInvoker(int objectArgCount) { MethodHandle vaInvoker = spreadInvokers[objectArgCount]; if (vaInvoker != null) return vaInvoker; - MethodHandle gInvoker = genericInvoker(); + MethodHandle gInvoker = generalInvoker(); vaInvoker = gInvoker.asSpreader(Object[].class, targetType.parameterCount() - objectArgCount); spreadInvokers[objectArgCount] = vaInvoker; return vaInvoker; diff --git a/jdk/src/share/classes/java/lang/invoke/MemberName.java b/jdk/src/share/classes/java/lang/invoke/MemberName.java index 0300fe758cc..c61336d2af1 100644 --- a/jdk/src/share/classes/java/lang/invoke/MemberName.java +++ b/jdk/src/share/classes/java/lang/invoke/MemberName.java @@ -525,7 +525,7 @@ import static java.lang.invoke.MethodHandleStatics.*; /** A factory type for resolving member names with the help of the VM. * TBD: Define access-safe public constructors for this factory. */ - public static class Factory { + /*non-public*/ static class Factory { private Factory() { } // singleton pattern static Factory INSTANCE = new Factory(); diff --git a/jdk/src/share/classes/java/lang/invoke/MethodHandle.java b/jdk/src/share/classes/java/lang/invoke/MethodHandle.java index 8165915c7bb..e6424f1516f 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodHandle.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodHandle.java @@ -26,6 +26,7 @@ package java.lang.invoke; +import sun.invoke.util.ValueConversions; import static java.lang.invoke.MethodHandleStatics.*; /** @@ -53,12 +54,12 @@ import static java.lang.invoke.MethodHandleStatics.*; * and the kinds of transformations that apply to it. *

      * A method handle contains a pair of special invoker methods - * called {@link #invokeExact invokeExact} and {@link #invokeGeneric invokeGeneric}. + * called {@link #invokeExact invokeExact} and {@link #invoke invoke}. * Both invoker methods provide direct access to the method handle's * underlying method, constructor, field, or other operation, * as modified by transformations of arguments and return values. * Both invokers accept calls which exactly match the method handle's own type. - * The {@code invokeGeneric} invoker also accepts a range of other call types. + * The plain, inexact invoker also accepts a range of other call types. *

      * Method handles are immutable and have no visible state. * Of course, they can be bound to underlying methods or data which exhibit state. @@ -76,7 +77,7 @@ import static java.lang.invoke.MethodHandleStatics.*; * may change from time to time or across implementations from different vendors. * *

      Method handle compilation

      - * A Java method call expression naming {@code invokeExact} or {@code invokeGeneric} + * A Java method call expression naming {@code invokeExact} or {@code invoke} * can invoke a method handle from Java source code. * From the viewpoint of source code, these methods can take any arguments * and their result can be cast to any return type. @@ -86,7 +87,7 @@ import static java.lang.invoke.MethodHandleStatics.*; * which connects this freedom of invocation directly to the JVM execution stack. *

      * As is usual with virtual methods, source-level calls to {@code invokeExact} - * and {@code invokeGeneric} compile to an {@code invokevirtual} instruction. + * and {@code invoke} compile to an {@code invokevirtual} instruction. * More unusually, the compiler must record the actual argument types, * and may not perform method invocation conversions on the arguments. * Instead, it must push them on the stack according to their own unconverted types. @@ -109,7 +110,7 @@ import static java.lang.invoke.MethodHandleStatics.*; * The first time a {@code invokevirtual} instruction is executed * it is linked, by symbolically resolving the names in the instruction * and verifying that the method call is statically legal. - * This is true of calls to {@code invokeExact} and {@code invokeGeneric}. + * This is true of calls to {@code invokeExact} and {@code invoke}. * In this case, the type descriptor emitted by the compiler is checked for * correct syntax and names it contains are resolved. * Thus, an {@code invokevirtual} instruction which invokes @@ -127,18 +128,18 @@ import static java.lang.invoke.MethodHandleStatics.*; * In the case of {@code invokeExact}, the type descriptor of the invocation * (after resolving symbolic type names) must exactly match the method type * of the receiving method handle. - * In the case of {@code invokeGeneric}, the resolved type descriptor + * In the case of plain, inexact {@code invoke}, the resolved type descriptor * must be a valid argument to the receiver's {@link #asType asType} method. - * Thus, {@code invokeGeneric} is more permissive than {@code invokeExact}. + * Thus, plain {@code invoke} is more permissive than {@code invokeExact}. *

      * After type matching, a call to {@code invokeExact} directly * and immediately invoke the method handle's underlying method * (or other behavior, as the case may be). *

      - * A call to {@code invokeGeneric} works the same as a call to + * A call to plain {@code invoke} works the same as a call to * {@code invokeExact}, if the type descriptor specified by the caller * exactly matches the method handle's own type. - * If there is a type mismatch, {@code invokeGeneric} attempts + * If there is a type mismatch, {@code invoke} attempts * to adjust the type of the receiving method handle, * as if by a call to {@link #asType asType}, * to obtain an exactly invokable method handle {@code M2}. @@ -152,7 +153,7 @@ import static java.lang.invoke.MethodHandleStatics.*; * In typical programs, method handle type matching will usually succeed. * But if a match fails, the JVM will throw a {@link WrongMethodTypeException}, * either directly (in the case of {@code invokeExact}) or indirectly as if - * by a failed call to {@code asType} (in the case of {@code invokeGeneric}). + * by a failed call to {@code asType} (in the case of {@code invoke}). *

      * Thus, a method type mismatch which might show up as a linkage error * in a statically typed program can show up as @@ -249,8 +250,8 @@ assert(s.equals("savvy")); mt = MethodType.methodType(java.util.List.class, Object[].class); mh = lookup.findStatic(java.util.Arrays.class, "asList", mt); assert(mh.isVarargsCollector()); -x = mh.invokeGeneric("one", "two"); -// invokeGeneric(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object; +x = mh.invoke("one", "two"); +// invoke(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object; assert(x.equals(java.util.Arrays.asList("one","two"))); // mt is (Object,Object,Object)Object mt = MethodType.genericMethodType(3); @@ -269,12 +270,12 @@ mh = lookup.findVirtual(java.io.PrintStream.class, "println", mt); mh.invokeExact(System.out, "Hello, world."); // invokeExact(Ljava/io/PrintStream;Ljava/lang/String;)V * - * Each of the above calls to {@code invokeExact} or {@code invokeGeneric} + * Each of the above calls to {@code invokeExact} or plain {@code invoke} * generates a single invokevirtual instruction with * the type descriptor indicated in the following comment. * *

      Exceptions

      - * The methods {@code invokeExact} and {@code invokeGeneric} are declared + * The methods {@code invokeExact} and {@code invoke} are declared * to throw {@link java.lang.Throwable Throwable}, * which is to say that there is no static restriction on what a method handle * can throw. Since the JVM does not distinguish between checked @@ -288,7 +289,7 @@ mh.invokeExact(System.out, "Hello, world."); * *

      Signature polymorphism

      * The unusual compilation and linkage behavior of - * {@code invokeExact} and {@code invokeGeneric} + * {@code invokeExact} and plain {@code invoke} * is referenced by the term signature polymorphism. * A signature polymorphic method is one which can operate with * any of a wide range of call signatures and return types. @@ -322,7 +323,7 @@ mh.invokeExact(System.out, "Hello, world."); * The following methods (and no others) are signature polymorphic: *
        *
      • {@link java.lang.invoke.MethodHandle#invokeExact MethodHandle.invokeExact} - *
      • {@link java.lang.invoke.MethodHandle#invokeGeneric MethodHandle.invokeGeneric} + *
      • {@link java.lang.invoke.MethodHandle#invoke MethodHandle.invoke} *
      *

      * A signature polymorphic method will be declared with the following properties: @@ -374,24 +375,34 @@ mh.invokeExact(System.out, "Hello, world."); *

      * As a special case, * when the Core Reflection API is used to view the signature polymorphic - * methods {@code invokeExact} or {@code invokeGeneric} in this class, - * they appear as single, non-polymorphic native methods. - * Calls to these native methods do not result in method handle invocations. + * methods {@code invokeExact} or plain {@code invoke} in this class, + * they appear as ordinary non-polymorphic methods. + * Their reflective appearance, as viewed by + * {@link java.lang.Class#getDeclaredMethod Class.getDeclaredMethod}, + * is unaffected by their special status in this API. + * For example, {@link java.lang.reflect.Method#getModifiers Method.getModifiers} + * will report exactly those modifier bits required for any similarly + * declared method, including in this case {@code native} and {@code varargs} bits. + *

      + * As with any reflected method, these methods (when reflected) may be + * invoked via {@link java.lang.reflect.Method#invoke Method.invoke}. + * However, such reflective calls do not result in method handle invocations. + * Such a call, if passed the required argument + * (a single one, of type {@code Object[]}), will ignore the argument and + * will throw an {@code UnsupportedOperationException}. + *

      * Since {@code invokevirtual} instructions can natively * invoke method handles under any type descriptor, this reflective view conflicts - * with the normal presentation via bytecodes. - * Thus, these two native methods, as viewed by - * {@link java.lang.Class#getDeclaredMethod Class.getDeclaredMethod}, - * are placeholders only. - * If invoked via {@link java.lang.reflect.Method#invoke Method.invoke}, - * they will throw {@code UnsupportedOperationException}. + * with the normal presentation of these methods via bytecodes. + * Thus, these two native methods, when reflectively viewed by + * {@code Class.getDeclaredMethod}, may be regarded as placeholders only. *

      * In order to obtain an invoker method for a particular type descriptor, * use {@link java.lang.invoke.MethodHandles#exactInvoker MethodHandles.exactInvoker}, - * or {@link java.lang.invoke.MethodHandles#genericInvoker MethodHandles.genericInvoker}. + * or {@link java.lang.invoke.MethodHandles#invoker MethodHandles.invoker}. * The {@link java.lang.invoke.MethodHandles.Lookup#findVirtual Lookup.findVirtual} * API is also able to return a method handle - * to call {@code invokeExact} or {@code invokeGeneric}, + * to call {@code invokeExact} or plain {@code invoke}, * for any specified type descriptor . * *

      Interoperation between method handles and Java generics

      @@ -523,7 +534,7 @@ public abstract class MethodHandle { * adaptations directly on the caller's arguments, * and call the target method handle according to its own exact type. *

      - * The type descriptor at the call site of {@code invokeGeneric} must + * The type descriptor at the call site of {@code invoke} must * be a valid argument to the receivers {@code asType} method. * In particular, the caller must specify the same argument arity * as the callee's type, @@ -539,11 +550,18 @@ public abstract class MethodHandle { * @throws ClassCastException if the target's type can be adjusted to the caller, but a reference cast fails * @throws Throwable anything thrown by the underlying method propagates unchanged through the method handle call */ + public final native @PolymorphicSignature Object invoke(Object... args) throws Throwable; + + /** + * Temporary alias for {@link #invoke}, for backward compatibility with some versions of JSR 292. + * On some JVMs, support can be excluded by the flags {@code -XX:+UnlockExperimentalVMOptions -XX:-AllowInvokeGeneric}. + * @deprecated Will be removed for JSR 292 Proposed Final Draft. + */ public final native @PolymorphicSignature Object invokeGeneric(Object... args) throws Throwable; /** * Performs a varargs invocation, passing the arguments in the given array - * to the method handle, as if via {@link #invokeGeneric invokeGeneric} from a call site + * to the method handle, as if via an inexact {@link #invoke invoke} from a call site * which mentions only the type {@code Object}, and whose arity is the length * of the argument array. *

      @@ -553,7 +571,7 @@ public abstract class MethodHandle { *

        *
      • Determine the length of the argument array as {@code N}. * For a null reference, {@code N=0}.
      • - *
      • Determine the generic type {@code TN} of {@code N} arguments as + *
      • Determine the general type {@code TN} of {@code N} arguments as * as {@code TN=MethodType.genericMethodType(N)}.
      • *
      • Force the original target method handle {@code MH0} to the * required type, as {@code MH1 = MH0.asType(TN)}.
      • @@ -580,7 +598,7 @@ public abstract class MethodHandle { * Object result = invoker.invokeExact(this, arguments); * *

        - * Unlike the signature polymorphic methods {@code invokeExact} and {@code invokeGeneric}, + * Unlike the signature polymorphic methods {@code invokeExact} and {@code invoke}, * {@code invokeWithArguments} can be accessed normally via the Core Reflection API and JNI. * It can therefore be used as a bridge between native or reflective code and method handles. * @@ -595,11 +613,11 @@ public abstract class MethodHandle { int argc = arguments == null ? 0 : arguments.length; MethodType type = type(); if (type.parameterCount() != argc) { - // simulate invokeGeneric + // simulate invoke return asType(MethodType.genericMethodType(argc)).invokeWithArguments(arguments); } if (argc <= 10) { - MethodHandle invoker = type.invokers().genericInvoker(); + MethodHandle invoker = type.invokers().generalInvoker(); switch (argc) { case 0: return invoker.invokeExact(this); case 1: return invoker.invokeExact(this, @@ -644,7 +662,7 @@ public abstract class MethodHandle { /** * Performs a varargs invocation, passing the arguments in the given array - * to the method handle, as if via {@link #invokeGeneric invokeGeneric} from a call site + * to the method handle, as if via an inexact {@link #invoke invoke} from a call site * which mentions only the type {@code Object}, and whose arity is the length * of the argument array. *

        @@ -672,9 +690,9 @@ public abstract class MethodHandle { * If the original type and new type are equal, returns {@code this}. *

        * This method provides the crucial behavioral difference between - * {@link #invokeExact invokeExact} and {@link #invokeGeneric invokeGeneric}. The two methods + * {@link #invokeExact invokeExact} and plain, inexact {@link #invoke invoke}. The two methods * perform the same steps when the caller's type descriptor is identical - * with the callee's, but when the types differ, {@link #invokeGeneric invokeGeneric} + * with the callee's, but when the types differ, plain {@link #invoke invoke} * also calls {@code asType} (or some internal equivalent) in order * to match up the caller's and callee's types. *

        @@ -689,6 +707,9 @@ public abstract class MethodHandle { * @see MethodHandles#convertArguments */ public MethodHandle asType(MethodType newType) { + if (!type.isConvertibleTo(newType)) { + throw new WrongMethodTypeException("cannot convert "+type+" to "+newType); + } return MethodHandles.convertArguments(this, newType); } @@ -731,13 +752,9 @@ public abstract class MethodHandle { public MethodHandle asSpreader(Class arrayType, int arrayLength) { Class arrayElement = arrayType.getComponentType(); if (arrayElement == null) throw newIllegalArgumentException("not an array type"); - MethodType oldType = type(); - int nargs = oldType.parameterCount(); + int nargs = type().parameterCount(); if (nargs < arrayLength) throw newIllegalArgumentException("bad spread array length"); - int keepPosArgs = nargs - arrayLength; - MethodType newType = oldType.dropParameterTypes(keepPosArgs, nargs); - newType = newType.insertParameterTypes(keepPosArgs, arrayType); - return MethodHandles.spreadArguments(this, newType); + return MethodHandleImpl.spreadArguments(this, arrayType, arrayLength); } /** @@ -780,15 +797,18 @@ public abstract class MethodHandle { * @see #asVarargsCollector */ public MethodHandle asCollector(Class arrayType, int arrayLength) { + asCollectorChecks(arrayType, arrayLength); + MethodHandle collector = ValueConversions.varargsArray(arrayType, arrayLength); + return MethodHandleImpl.collectArguments(this, type.parameterCount()-1, collector); + } + + private void asCollectorChecks(Class arrayType, int arrayLength) { Class arrayElement = arrayType.getComponentType(); - if (arrayElement == null) throw newIllegalArgumentException("not an array type"); - MethodType oldType = type(); - int nargs = oldType.parameterCount(); - if (nargs == 0) throw newIllegalArgumentException("no trailing argument"); - MethodType newType = oldType.dropParameterTypes(nargs-1, nargs); - newType = newType.insertParameterTypes(nargs-1, - java.util.Collections.>nCopies(arrayLength, arrayElement)); - return MethodHandles.collectArguments(this, newType); + if (arrayElement == null) + throw newIllegalArgumentException("not an array type", arrayType); + int nargs = type().parameterCount(); + if (nargs == 0 || !type().parameterType(nargs-1).isAssignableFrom(arrayType)) + throw newIllegalArgumentException("array type not assignable to trailing argument", this, arrayType); } /** @@ -798,7 +818,7 @@ public abstract class MethodHandle { *

        * The type and behavior of the adapter will be the same as * the type and behavior of the target, except that certain - * {@code invokeGeneric} and {@code asType} requests can lead to + * {@code invoke} and {@code asType} requests can lead to * trailing positional arguments being collected into target's * trailing parameter. * Also, the last parameter type of the adapter will be @@ -812,17 +832,17 @@ public abstract class MethodHandle { * since it accepts a whole array of indeterminate length, * rather than a fixed number of arguments.) *

        - * When called with {@link #invokeGeneric invokeGeneric}, if the caller + * When called with plain, inexact {@link #invoke invoke}, if the caller * type is the same as the adapter, the adapter invokes the target as with * {@code invokeExact}. - * (This is the normal behavior for {@code invokeGeneric} when types match.) + * (This is the normal behavior for {@code invoke} when types match.) *

        * Otherwise, if the caller and adapter arity are the same, and the * trailing parameter type of the caller is a reference type identical to * or assignable to the trailing parameter type of the adapter, * the arguments and return values are converted pairwise, * as if by {@link MethodHandles#convertArguments convertArguments}. - * (This is also normal behavior for {@code invokeGeneric} in such a case.) + * (This is also normal behavior for {@code invoke} in such a case.) *

        * Otherwise, the arities differ, or the adapter's trailing parameter * type is not assignable from the corresponding caller type. @@ -838,7 +858,7 @@ public abstract class MethodHandle { * where {@code N} is the arity of the target. * Also, there must exist conversions from the incoming arguments * to the target's arguments. - * As with other uses of {@code invokeGeneric}, if these basic + * As with other uses of plain {@code invoke}, if these basic * requirements are not fulfilled, a {@code WrongMethodTypeException} * may be thrown. *

        @@ -856,7 +876,7 @@ public abstract class MethodHandle { *

        * The behavior of {@link #asType asType} is also specialized for * variable arity adapters, to maintain the invariant that - * {@code invokeGeneric} is always equivalent to an {@code asType} + * plain, inexact {@code invoke} is always equivalent to an {@code asType} * call to adjust the target type, followed by {@code invokeExact}. * Therefore, a variable arity adapter responds * to an {@code asType} request by building a fixed arity collector, @@ -893,12 +913,12 @@ public abstract class MethodHandle { MethodHandle asList = publicLookup() .findStatic(Arrays.class, "asList", methodType(List.class, Object[].class)) .asVarargsCollector(Object[].class); -assertEquals("[]", asList.invokeGeneric().toString()); -assertEquals("[1]", asList.invokeGeneric(1).toString()); -assertEquals("[two, too]", asList.invokeGeneric("two", "too").toString()); +assertEquals("[]", asList.invoke().toString()); +assertEquals("[1]", asList.invoke(1).toString()); +assertEquals("[two, too]", asList.invoke("two", "too").toString()); Object[] argv = { "three", "thee", "tee" }; -assertEquals("[three, thee, tee]", asList.invokeGeneric(argv).toString()); -List ls = (List) asList.invokeGeneric((Object)argv); +assertEquals("[three, thee, tee]", asList.invoke(argv).toString()); +List ls = (List) asList.invoke((Object)argv); assertEquals(1, ls.size()); assertEquals("[three, thee, tee]", Arrays.toString((Object[])ls.get(0))); * @@ -926,9 +946,9 @@ MethodHandle vamh = publicLookup() .asVarargsCollector(Object[].class); MethodHandle mh = MethodHandles.exactInvoker(vamh.type()).bindTo(vamh); assert(vamh.type().equals(mh.type())); -assertEquals("[1, 2, 3]", vamh.invokeGeneric(1,2,3).toString()); +assertEquals("[1, 2, 3]", vamh.invoke(1,2,3).toString()); boolean failed = false; -try { mh.invokeGeneric(1,2,3); } +try { mh.invoke(1,2,3); } catch (WrongMethodTypeException ex) { failed = true; } assert(failed); * @@ -960,7 +980,7 @@ assert(failed); *

      • an {@code ldc} instruction of a {@code CONSTANT_MethodHandle} * which resolves to a variable arity Java method or constructor *
      - * @return true if this method handle accepts more than one arity of {@code invokeGeneric} calls + * @return true if this method handle accepts more than one arity of plain, inexact {@code invoke} calls * @see #asVarargsCollector */ public boolean isVarargsCollector() { diff --git a/jdk/src/share/classes/java/lang/invoke/MethodHandleImpl.java b/jdk/src/share/classes/java/lang/invoke/MethodHandleImpl.java index e82442092fe..b17251e4799 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodHandleImpl.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodHandleImpl.java @@ -121,11 +121,11 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; if (nargs < INVOKES.length) { MethodHandle invoke = INVOKES[nargs]; MethodType conType = CON_TYPES[nargs]; - MethodHandle gcon = convertArguments(rawConstructor, conType, rawConType, null); + MethodHandle gcon = convertArguments(rawConstructor, conType, rawConType, 0); if (gcon == null) return null; MethodHandle galloc = new AllocateObject(invoke, allocateClass, gcon); assert(galloc.type() == newType.generic()); - return convertArguments(galloc, newType, galloc.type(), null); + return convertArguments(galloc, newType, galloc.type(), 0); } else { MethodHandle invoke = VARARGS_INVOKE; MethodType conType = CON_TYPES[nargs]; @@ -256,8 +256,8 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; FieldAccessor.ahandle(arrayClass, true) }; if (mhs[0].type().parameterType(0) == Class.class) { - mhs[0] = MethodHandles.insertArguments(mhs[0], 0, elemClass); - mhs[1] = MethodHandles.insertArguments(mhs[1], 0, elemClass); + mhs[0] = mhs[0].bindTo(elemClass); + mhs[1] = mhs[1].bindTo(elemClass); } synchronized (FieldAccessor.ARRAY_CACHE) {} // memory barrier FieldAccessor.ARRAY_CACHE.put(elemClass, mhs); @@ -372,7 +372,7 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; if (evclass != vclass || (!isStatic && ecclass != cclass)) { MethodType strongType = FieldAccessor.ftype(cclass, vclass, isSetter, isStatic); strongType = strongType.insertParameterTypes(0, FieldAccessor.class); - mh = MethodHandles.convertArguments(mh, strongType); + mh = convertArguments(mh, strongType, 0); } return mh; } @@ -439,8 +439,8 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; } if (caclass != null) { MethodType strongType = FieldAccessor.atype(caclass, isSetter); - mh = MethodHandles.insertArguments(mh, 0, caclass); - mh = MethodHandles.convertArguments(mh, strongType); + mh = mh.bindTo(caclass); + mh = convertArguments(mh, strongType, 0); } return mh; } @@ -465,7 +465,7 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; dmh.type().parameterType(0).isAssignableFrom(receiver.getClass())) { MethodHandle bmh = new BoundMethodHandle(dmh, receiver, 0); MethodType newType = target.type().dropParameterTypes(0, 1); - return convertArguments(bmh, newType, bmh.type(), null); + return convertArguments(bmh, newType, bmh.type(), 0); } } } @@ -486,301 +486,378 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; return new BoundMethodHandle(target, receiver, argnum); } - static MethodHandle convertArguments(MethodHandle target, + static MethodHandle permuteArguments(MethodHandle target, MethodType newType, MethodType oldType, int[] permutationOrNull) { assert(oldType.parameterCount() == target.type().parameterCount()); - if (permutationOrNull != null) { - int outargs = oldType.parameterCount(), inargs = newType.parameterCount(); - if (permutationOrNull.length != outargs) - throw newIllegalArgumentException("wrong number of arguments in permutation"); - // Make the individual outgoing argument types match up first. - Class[] callTypeArgs = new Class[outargs]; - for (int i = 0; i < outargs; i++) - callTypeArgs[i] = newType.parameterType(permutationOrNull[i]); - MethodType callType = MethodType.methodType(oldType.returnType(), callTypeArgs); - target = convertArguments(target, callType, oldType, null); - assert(target != null); - oldType = target.type(); - List goal = new ArrayList(); // i*TOKEN - List state = new ArrayList(); // i*TOKEN - List drops = new ArrayList(); // not tokens - List dups = new ArrayList(); // not tokens - final int TOKEN = 10; // to mark items which are symbolic only - // state represents the argument values coming into target - for (int i = 0; i < outargs; i++) { - state.add(permutationOrNull[i] * TOKEN); - } - // goal represents the desired state - for (int i = 0; i < inargs; i++) { - if (state.contains(i * TOKEN)) { - goal.add(i * TOKEN); - } else { - // adapter must initially drop all unused arguments - drops.add(i); - } - } - // detect duplications - while (state.size() > goal.size()) { - for (int i2 = 0; i2 < state.size(); i2++) { - int arg1 = state.get(i2); - int i1 = state.indexOf(arg1); - if (i1 != i2) { - // found duplicate occurrence at i2 - int arg2 = (inargs++) * TOKEN; - state.set(i2, arg2); - dups.add(goal.indexOf(arg1)); - goal.add(arg2); - } - } - } - assert(state.size() == goal.size()); - int size = goal.size(); - while (!state.equals(goal)) { - // Look for a maximal sequence of adjacent misplaced arguments, - // and try to rotate them into place. - int bestRotArg = -10 * TOKEN, bestRotLen = 0; - int thisRotArg = -10 * TOKEN, thisRotLen = 0; - for (int i = 0; i < size; i++) { - int arg = state.get(i); - // Does this argument match the current run? - if (arg == thisRotArg + TOKEN) { - thisRotArg = arg; - thisRotLen += 1; - if (bestRotLen < thisRotLen) { - bestRotLen = thisRotLen; - bestRotArg = thisRotArg; - } - } else { - // The old sequence (if any) stops here. - thisRotLen = 0; - thisRotArg = -10 * TOKEN; - // But maybe a new one starts here also. - int wantArg = goal.get(i); - final int MAX_ARG_ROTATION = AdapterMethodHandle.MAX_ARG_ROTATION; - if (arg != wantArg && - arg >= wantArg - TOKEN * MAX_ARG_ROTATION && - arg <= wantArg + TOKEN * MAX_ARG_ROTATION) { - thisRotArg = arg; - thisRotLen = 1; - } - } - } - if (bestRotLen >= 2) { - // Do a rotation if it can improve argument positioning - // by at least 2 arguments. This is not always optimal, - // but it seems to catch common cases. - int dstEnd = state.indexOf(bestRotArg); - int srcEnd = goal.indexOf(bestRotArg); - int rotBy = dstEnd - srcEnd; - int dstBeg = dstEnd - (bestRotLen - 1); - int srcBeg = srcEnd - (bestRotLen - 1); - assert((dstEnd | dstBeg | srcEnd | srcBeg) >= 0); // no negs - // Make a span which covers both source and destination. - int rotBeg = Math.min(dstBeg, srcBeg); - int rotEnd = Math.max(dstEnd, srcEnd); - int score = 0; - for (int i = rotBeg; i <= rotEnd; i++) { - if ((int)state.get(i) != (int)goal.get(i)) - score += 1; - } - List rotSpan = state.subList(rotBeg, rotEnd+1); - Collections.rotate(rotSpan, -rotBy); // reverse direction - for (int i = rotBeg; i <= rotEnd; i++) { - if ((int)state.get(i) != (int)goal.get(i)) - score -= 1; - } - if (score >= 2) { - // Improved at least two argument positions. Do it. - List> ptypes = Arrays.asList(oldType.parameterArray()); - Collections.rotate(ptypes.subList(rotBeg, rotEnd+1), -rotBy); - MethodType rotType = MethodType.methodType(oldType.returnType(), ptypes); - MethodHandle nextTarget - = AdapterMethodHandle.makeRotateArguments(rotType, target, - rotBeg, rotSpan.size(), rotBy); - if (nextTarget != null) { - //System.out.println("Rot: "+rotSpan+" by "+rotBy); - target = nextTarget; - oldType = rotType; - continue; - } - } - // Else de-rotate, and drop through to the swap-fest. - Collections.rotate(rotSpan, rotBy); - } - - // Now swap like the wind! - List> ptypes = Arrays.asList(oldType.parameterArray()); - for (int i = 0; i < size; i++) { - // What argument do I want here? - int arg = goal.get(i); - if (arg != state.get(i)) { - // Where is it now? - int j = state.indexOf(arg); - Collections.swap(ptypes, i, j); - MethodType swapType = MethodType.methodType(oldType.returnType(), ptypes); - target = AdapterMethodHandle.makeSwapArguments(swapType, target, i, j); - if (target == null) throw newIllegalArgumentException("cannot swap"); - assert(target.type() == swapType); - oldType = swapType; - Collections.swap(state, i, j); - } - } - // One pass of swapping must finish the job. - assert(state.equals(goal)); - } - while (!dups.isEmpty()) { - // Grab a contiguous trailing sequence of dups. - int grab = dups.size() - 1; - int dupArgPos = dups.get(grab), dupArgCount = 1; - while (grab - 1 >= 0) { - int dup0 = dups.get(grab - 1); - if (dup0 != dupArgPos - 1) break; - dupArgPos -= 1; - dupArgCount += 1; - grab -= 1; - } - //if (dupArgCount > 1) System.out.println("Dup: "+dups.subList(grab, dups.size())); - dups.subList(grab, dups.size()).clear(); - // In the new target type drop that many args from the tail: - List> ptypes = oldType.parameterList(); - ptypes = ptypes.subList(0, ptypes.size() - dupArgCount); - MethodType dupType = MethodType.methodType(oldType.returnType(), ptypes); - target = AdapterMethodHandle.makeDupArguments(dupType, target, dupArgPos, dupArgCount); - if (target == null) - throw newIllegalArgumentException("cannot dup"); - oldType = target.type(); - } - while (!drops.isEmpty()) { - // Grab a contiguous initial sequence of drops. - int dropArgPos = drops.get(0), dropArgCount = 1; - while (dropArgCount < drops.size()) { - int drop1 = drops.get(dropArgCount); - if (drop1 != dropArgPos + dropArgCount) break; - dropArgCount += 1; - } - //if (dropArgCount > 1) System.out.println("Drop: "+drops.subList(0, dropArgCount)); - drops.subList(0, dropArgCount).clear(); - List> dropTypes = newType.parameterList() - .subList(dropArgPos, dropArgPos + dropArgCount); - MethodType dropType = oldType.insertParameterTypes(dropArgPos, dropTypes); - target = AdapterMethodHandle.makeDropArguments(dropType, target, dropArgPos, dropArgCount); - if (target == null) throw newIllegalArgumentException("cannot drop"); - oldType = target.type(); + int outargs = oldType.parameterCount(), inargs = newType.parameterCount(); + if (permutationOrNull.length != outargs) + throw newIllegalArgumentException("wrong number of arguments in permutation"); + // Make the individual outgoing argument types match up first. + Class[] callTypeArgs = new Class[outargs]; + for (int i = 0; i < outargs; i++) + callTypeArgs[i] = newType.parameterType(permutationOrNull[i]); + MethodType callType = MethodType.methodType(oldType.returnType(), callTypeArgs); + target = convertArguments(target, callType, oldType, 0); + assert(target != null); + oldType = target.type(); + List goal = new ArrayList(); // i*TOKEN + List state = new ArrayList(); // i*TOKEN + List drops = new ArrayList(); // not tokens + List dups = new ArrayList(); // not tokens + final int TOKEN = 10; // to mark items which are symbolic only + // state represents the argument values coming into target + for (int i = 0; i < outargs; i++) { + state.add(permutationOrNull[i] * TOKEN); + } + // goal represents the desired state + for (int i = 0; i < inargs; i++) { + if (state.contains(i * TOKEN)) { + goal.add(i * TOKEN); + } else { + // adapter must initially drop all unused arguments + drops.add(i); } } + // detect duplications + while (state.size() > goal.size()) { + for (int i2 = 0; i2 < state.size(); i2++) { + int arg1 = state.get(i2); + int i1 = state.indexOf(arg1); + if (i1 != i2) { + // found duplicate occurrence at i2 + int arg2 = (inargs++) * TOKEN; + state.set(i2, arg2); + dups.add(goal.indexOf(arg1)); + goal.add(arg2); + } + } + } + assert(state.size() == goal.size()); + int size = goal.size(); + while (!state.equals(goal)) { + // Look for a maximal sequence of adjacent misplaced arguments, + // and try to rotate them into place. + int bestRotArg = -10 * TOKEN, bestRotLen = 0; + int thisRotArg = -10 * TOKEN, thisRotLen = 0; + for (int i = 0; i < size; i++) { + int arg = state.get(i); + // Does this argument match the current run? + if (arg == thisRotArg + TOKEN) { + thisRotArg = arg; + thisRotLen += 1; + if (bestRotLen < thisRotLen) { + bestRotLen = thisRotLen; + bestRotArg = thisRotArg; + } + } else { + // The old sequence (if any) stops here. + thisRotLen = 0; + thisRotArg = -10 * TOKEN; + // But maybe a new one starts here also. + int wantArg = goal.get(i); + final int MAX_ARG_ROTATION = AdapterMethodHandle.MAX_ARG_ROTATION; + if (arg != wantArg && + arg >= wantArg - TOKEN * MAX_ARG_ROTATION && + arg <= wantArg + TOKEN * MAX_ARG_ROTATION) { + thisRotArg = arg; + thisRotLen = 1; + } + } + } + if (bestRotLen >= 2) { + // Do a rotation if it can improve argument positioning + // by at least 2 arguments. This is not always optimal, + // but it seems to catch common cases. + int dstEnd = state.indexOf(bestRotArg); + int srcEnd = goal.indexOf(bestRotArg); + int rotBy = dstEnd - srcEnd; + int dstBeg = dstEnd - (bestRotLen - 1); + int srcBeg = srcEnd - (bestRotLen - 1); + assert((dstEnd | dstBeg | srcEnd | srcBeg) >= 0); // no negs + // Make a span which covers both source and destination. + int rotBeg = Math.min(dstBeg, srcBeg); + int rotEnd = Math.max(dstEnd, srcEnd); + int score = 0; + for (int i = rotBeg; i <= rotEnd; i++) { + if ((int)state.get(i) != (int)goal.get(i)) + score += 1; + } + List rotSpan = state.subList(rotBeg, rotEnd+1); + Collections.rotate(rotSpan, -rotBy); // reverse direction + for (int i = rotBeg; i <= rotEnd; i++) { + if ((int)state.get(i) != (int)goal.get(i)) + score -= 1; + } + if (score >= 2) { + // Improved at least two argument positions. Do it. + List> ptypes = Arrays.asList(oldType.parameterArray()); + Collections.rotate(ptypes.subList(rotBeg, rotEnd+1), -rotBy); + MethodType rotType = MethodType.methodType(oldType.returnType(), ptypes); + MethodHandle nextTarget + = AdapterMethodHandle.makeRotateArguments(rotType, target, + rotBeg, rotSpan.size(), rotBy); + if (nextTarget != null) { + //System.out.println("Rot: "+rotSpan+" by "+rotBy); + target = nextTarget; + oldType = rotType; + continue; + } + } + // Else de-rotate, and drop through to the swap-fest. + Collections.rotate(rotSpan, rotBy); + } + + // Now swap like the wind! + List> ptypes = Arrays.asList(oldType.parameterArray()); + for (int i = 0; i < size; i++) { + // What argument do I want here? + int arg = goal.get(i); + if (arg != state.get(i)) { + // Where is it now? + int j = state.indexOf(arg); + Collections.swap(ptypes, i, j); + MethodType swapType = MethodType.methodType(oldType.returnType(), ptypes); + target = AdapterMethodHandle.makeSwapArguments(swapType, target, i, j); + if (target == null) throw newIllegalArgumentException("cannot swap"); + assert(target.type() == swapType); + oldType = swapType; + Collections.swap(state, i, j); + } + } + // One pass of swapping must finish the job. + assert(state.equals(goal)); + } + while (!dups.isEmpty()) { + // Grab a contiguous trailing sequence of dups. + int grab = dups.size() - 1; + int dupArgPos = dups.get(grab), dupArgCount = 1; + while (grab - 1 >= 0) { + int dup0 = dups.get(grab - 1); + if (dup0 != dupArgPos - 1) break; + dupArgPos -= 1; + dupArgCount += 1; + grab -= 1; + } + //if (dupArgCount > 1) System.out.println("Dup: "+dups.subList(grab, dups.size())); + dups.subList(grab, dups.size()).clear(); + // In the new target type drop that many args from the tail: + List> ptypes = oldType.parameterList(); + ptypes = ptypes.subList(0, ptypes.size() - dupArgCount); + MethodType dupType = MethodType.methodType(oldType.returnType(), ptypes); + target = AdapterMethodHandle.makeDupArguments(dupType, target, dupArgPos, dupArgCount); + if (target == null) + throw newIllegalArgumentException("cannot dup"); + oldType = target.type(); + } + while (!drops.isEmpty()) { + // Grab a contiguous initial sequence of drops. + int dropArgPos = drops.get(0), dropArgCount = 1; + while (dropArgCount < drops.size()) { + int drop1 = drops.get(dropArgCount); + if (drop1 != dropArgPos + dropArgCount) break; + dropArgCount += 1; + } + //if (dropArgCount > 1) System.out.println("Drop: "+drops.subList(0, dropArgCount)); + drops.subList(0, dropArgCount).clear(); + List> dropTypes = newType.parameterList() + .subList(dropArgPos, dropArgPos + dropArgCount); + MethodType dropType = oldType.insertParameterTypes(dropArgPos, dropTypes); + target = AdapterMethodHandle.makeDropArguments(dropType, target, dropArgPos, dropArgCount); + if (target == null) throw newIllegalArgumentException("cannot drop"); + oldType = target.type(); + } + return convertArguments(target, newType, oldType, 0); + } + + /*non-public*/ static + MethodHandle convertArguments(MethodHandle target, MethodType newType, int level) { + MethodType oldType = target.type(); + if (oldType.equals(newType)) + return target; + assert(level > 1 || oldType.isConvertibleTo(newType)); + MethodHandle retFilter = null; + Class oldRT = oldType.returnType(); + Class newRT = newType.returnType(); + if (!VerifyType.isNullConversion(oldRT, newRT)) { + if (oldRT == void.class) { + Wrapper wrap = newRT.isPrimitive() ? Wrapper.forPrimitiveType(newRT) : Wrapper.OBJECT; + retFilter = ValueConversions.zeroConstantFunction(wrap); + } else { + retFilter = MethodHandles.identity(newRT); + retFilter = convertArguments(retFilter, retFilter.type().changeParameterType(0, oldRT), level); + } + newType = newType.changeReturnType(oldRT); + } + MethodHandle res = null; + Exception ex = null; + try { + res = convertArguments(target, newType, oldType, level); + } catch (IllegalArgumentException ex1) { + ex = ex1; + } + if (res == null) { + WrongMethodTypeException wmt = new WrongMethodTypeException("cannot convert to "+newType+": "+target); + wmt.initCause(ex); + throw wmt; + } + if (retFilter != null) + res = MethodHandles.filterReturnValue(res, retFilter); + return res; + } + + static MethodHandle convertArguments(MethodHandle target, + MethodType newType, + MethodType oldType, + int level) { + assert(oldType.parameterCount() == target.type().parameterCount()); if (newType == oldType) return target; if (oldType.parameterCount() != newType.parameterCount()) - throw newIllegalArgumentException("mismatched parameter count"); - MethodHandle res = AdapterMethodHandle.makePairwiseConvert(newType, target); + throw newIllegalArgumentException("mismatched parameter count", oldType, newType); + MethodHandle res = AdapterMethodHandle.makePairwiseConvert(newType, target, level); if (res != null) return res; + // We can come here in the case of target(int)void => (Object)void, + // because the unboxing logic for Object => int is complex. int argc = oldType.parameterCount(); + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this code is deprecated // The JVM can't do it directly, so fill in the gap with a Java adapter. // TO DO: figure out what to put here from case-by-case experience // Use a heavier method: Convert all the arguments to Object, // then back to the desired types. We might have to use Java-based // method handles to do this. MethodType objType = MethodType.genericMethodType(argc); - MethodHandle objTarget = AdapterMethodHandle.makePairwiseConvert(objType, target); + MethodHandle objTarget = AdapterMethodHandle.makePairwiseConvert(objType, target, level); if (objTarget == null) objTarget = FromGeneric.make(target); - res = AdapterMethodHandle.makePairwiseConvert(newType, objTarget); + res = AdapterMethodHandle.makePairwiseConvert(newType, objTarget, level); if (res != null) return res; return ToGeneric.make(newType, objTarget); } + static MethodHandle spreadArguments(MethodHandle target, Class arrayType, int arrayLength) { + MethodType oldType = target.type(); + int nargs = oldType.parameterCount(); + int keepPosArgs = nargs - arrayLength; + MethodType newType = oldType + .dropParameterTypes(keepPosArgs, nargs) + .insertParameterTypes(keepPosArgs, arrayType); + return spreadArguments(target, newType, keepPosArgs, arrayType, arrayLength); + } + static MethodHandle spreadArguments(MethodHandle target, MethodType newType, int spreadArgPos) { + int arrayLength = target.type().parameterCount() - spreadArgPos; + return spreadArguments(target, newType, spreadArgPos, Object[].class, arrayLength); + } static MethodHandle spreadArguments(MethodHandle target, MethodType newType, - int spreadArg) { + int spreadArgPos, + Class arrayType, + int arrayLength) { // TO DO: maybe allow the restarg to be Object and implicitly cast to Object[] MethodType oldType = target.type(); // spread the last argument of newType to oldType - int spreadCount = oldType.parameterCount() - spreadArg; - Class spreadArgType = Object[].class; - MethodHandle res = AdapterMethodHandle.makeSpreadArguments(newType, target, spreadArgType, spreadArg, spreadCount); - if (res != null) - return res; - // try an intermediate adapter - Class spreadType = null; - if (spreadArg < 0 || spreadArg >= newType.parameterCount() - || !VerifyType.isSpreadArgType(spreadType = newType.parameterType(spreadArg))) - throw newIllegalArgumentException("no restarg in "+newType); - Class[] ptypes = oldType.parameterArray(); - for (int i = 0; i < spreadCount; i++) - ptypes[spreadArg + i] = VerifyType.spreadArgElementType(spreadType, i); - MethodType midType = MethodType.methodType(newType.returnType(), ptypes); - // after spreading, some arguments may need further conversion - MethodHandle target2 = convertArguments(target, midType, oldType, null); - if (target2 == null) - throw new UnsupportedOperationException("NYI: convert "+midType+" =calls=> "+oldType); - res = AdapterMethodHandle.makeSpreadArguments(newType, target2, spreadArgType, spreadArg, spreadCount); - if (res != null) - return res; - res = SpreadGeneric.make(target2, spreadCount); - if (res != null) - res = convertArguments(res, newType, res.type(), null); + assert(arrayLength == oldType.parameterCount() - spreadArgPos); + assert(newType.parameterType(spreadArgPos) == arrayType); + MethodHandle res = AdapterMethodHandle.makeSpreadArguments(newType, target, arrayType, spreadArgPos, arrayLength); + if (res == null) throw new IllegalArgumentException("spread on "+target+" with "+arrayType.getSimpleName()); return res; } + static MethodHandle collectArguments(MethodHandle target, + int collectArg, + MethodHandle collector) { + MethodType type = target.type(); + Class collectType = collector.type().returnType(); + if (collectType != type.parameterType(collectArg)) + target = target.asType(type.changeParameterType(collectArg, collectType)); + MethodType newType = type + .dropParameterTypes(collectArg, collectArg+1) + .insertParameterTypes(collectArg, collector.type().parameterArray()); + return collectArguments(target, newType, collectArg, collector); + } static MethodHandle collectArguments(MethodHandle target, MethodType newType, int collectArg, MethodHandle collector) { MethodType oldType = target.type(); // (a...,c)=>r - if (collector == null) { - int numCollect = newType.parameterCount() - oldType.parameterCount() + 1; - collector = ValueConversions.varargsArray(numCollect); - } // newType // (a..., b...)=>r MethodType colType = collector.type(); // (b...)=>c // oldType // (a..., b...)=>r assert(newType.parameterCount() == collectArg + colType.parameterCount()); assert(oldType.parameterCount() == collectArg + 1); - MethodHandle gtarget = convertArguments(target, oldType.generic(), oldType, null); - MethodHandle gcollector = convertArguments(collector, colType.generic(), colType, null); - if (gtarget == null || gcollector == null) return null; - MethodHandle gresult = FilterGeneric.makeArgumentCollector(gcollector, gtarget); - MethodHandle result = convertArguments(gresult, newType, gresult.type(), null); + MethodHandle result = null; + if (AdapterMethodHandle.canCollectArguments(oldType, colType, collectArg, false)) { + result = AdapterMethodHandle.makeCollectArguments(target, collector, collectArg, false); + } + if (result == null) { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this code is deprecated + MethodHandle gtarget = convertArguments(target, oldType.generic(), oldType, 0); + MethodHandle gcollector = convertArguments(collector, colType.generic(), colType, 0); + if (gtarget == null || gcollector == null) return null; + MethodHandle gresult = FilterGeneric.makeArgumentCollector(gcollector, gtarget); + result = convertArguments(gresult, newType, gresult.type(), 0); + } return result; } static MethodHandle filterArgument(MethodHandle target, - int pos, - MethodHandle filter) { - MethodType ttype = target.type(), gttype = ttype.generic(); + int pos, + MethodHandle filter) { + MethodType ttype = target.type(); + MethodType ftype = filter.type(); + assert(ftype.parameterCount() == 1); + MethodType rtype = ttype.changeParameterType(pos, ftype.parameterType(0)); + MethodType gttype = ttype.generic(); if (ttype != gttype) { - target = convertArguments(target, gttype, ttype, null); + target = convertArguments(target, gttype, ttype, 0); ttype = gttype; } - MethodType ftype = filter.type(), gftype = ftype.generic(); - if (ftype.parameterCount() != 1) - throw new InternalError(); + MethodType gftype = ftype.generic(); if (ftype != gftype) { - filter = convertArguments(filter, gftype, ftype, null); + filter = convertArguments(filter, gftype, ftype, 0); ftype = gftype; } - if (ftype == ttype) { - // simple unary case - return FilterOneArgument.make(filter, target); + MethodHandle result = null; + if (AdapterMethodHandle.canCollectArguments(ttype, ftype, pos, false)) { + result = AdapterMethodHandle.makeCollectArguments(target, filter, pos, false); } - return FilterGeneric.makeArgumentFilter(pos, filter, target); + if (result == null) { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this code is deprecated + if (ftype == ttype) { + // simple unary case + result = FilterOneArgument.make(filter, target); + } else { + result = FilterGeneric.makeArgumentFilter(pos, filter, target); + } + } + if (result.type() != rtype) + result = result.asType(rtype); + return result; } static MethodHandle foldArguments(MethodHandle target, - MethodType newType, - MethodHandle combiner) { + MethodType newType, + int foldPos, + MethodHandle combiner) { MethodType oldType = target.type(); MethodType ctype = combiner.type(); - MethodHandle gtarget = convertArguments(target, oldType.generic(), oldType, null); - MethodHandle gcombiner = convertArguments(combiner, ctype.generic(), ctype, null); + if (AdapterMethodHandle.canCollectArguments(oldType, ctype, foldPos, true)) { + MethodHandle res = AdapterMethodHandle.makeCollectArguments(target, combiner, foldPos, true); + if (res != null) return res; + } + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this code is deprecated + if (foldPos != 0) return null; + MethodHandle gtarget = convertArguments(target, oldType.generic(), oldType, 0); + MethodHandle gcombiner = convertArguments(combiner, ctype.generic(), ctype, 0); + if (ctype.returnType() == void.class) { + gtarget = dropArguments(gtarget, oldType.generic().insertParameterTypes(foldPos, Object.class), foldPos); + } if (gtarget == null || gcombiner == null) return null; MethodHandle gresult = FilterGeneric.makeArgumentFolder(gcombiner, gtarget); - MethodHandle result = convertArguments(gresult, newType, gresult.type(), null); - return result; + return convertArguments(gresult, newType, gresult.type(), 0); } static @@ -802,6 +879,7 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; this.target = target; this.fallback = fallback; } + // FIXME: Build the control flow out of foldArguments. static MethodHandle make(MethodHandle test, MethodHandle target, MethodHandle fallback) { MethodType type = target.type(); int nargs = type.parameterCount(); @@ -809,12 +887,12 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; MethodHandle invoke = INVOKES[nargs]; MethodType gtype = type.generic(); assert(invoke.type().dropParameterTypes(0,1) == gtype); - MethodHandle gtest = convertArguments(test, gtype.changeReturnType(boolean.class), test.type(), null); - MethodHandle gtarget = convertArguments(target, gtype, type, null); - MethodHandle gfallback = convertArguments(fallback, gtype, type, null); + MethodHandle gtest = convertArguments(test, gtype.changeReturnType(boolean.class), test.type(), 0); + MethodHandle gtarget = convertArguments(target, gtype, type, 0); + MethodHandle gfallback = convertArguments(fallback, gtype, type, 0); if (gtest == null || gtarget == null || gfallback == null) return null; MethodHandle gguard = new GuardWithTest(invoke, gtest, gtarget, gfallback); - return convertArguments(gguard, type, gtype, null); + return convertArguments(gguard, type, gtype, 0); } else { MethodHandle invoke = VARARGS_INVOKE; MethodType gtype = MethodType.genericMethodType(1); @@ -925,8 +1003,9 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; GuardWithCatch(MethodHandle target, Class exType, MethodHandle catcher) { this(INVOKES[target.type().parameterCount()], target, exType, catcher); } - GuardWithCatch(MethodHandle invoker, - MethodHandle target, Class exType, MethodHandle catcher) { + // FIXME: Build the control flow out of foldArguments. + GuardWithCatch(MethodHandle invoker, + MethodHandle target, Class exType, MethodHandle catcher) { super(invoker); this.target = target; this.exType = exType; @@ -1057,11 +1136,11 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; if (nargs < GuardWithCatch.INVOKES.length) { MethodType gtype = type.generic(); MethodType gcatchType = gtype.insertParameterTypes(0, Throwable.class); - MethodHandle gtarget = convertArguments(target, gtype, type, null); - MethodHandle gcatcher = convertArguments(catcher, gcatchType, ctype, null); + MethodHandle gtarget = convertArguments(target, gtype, type, 0); + MethodHandle gcatcher = convertArguments(catcher, gcatchType, ctype, 0); MethodHandle gguard = new GuardWithCatch(gtarget, exType, gcatcher); if (gtarget == null || gcatcher == null || gguard == null) return null; - return convertArguments(gguard, type, gtype, null); + return convertArguments(gguard, type, gtype, 0); } else { MethodType gtype = MethodType.genericMethodType(0, true); MethodType gcatchType = gtype.insertParameterTypes(0, Throwable.class); diff --git a/jdk/src/share/classes/java/lang/invoke/MethodHandleNatives.java b/jdk/src/share/classes/java/lang/invoke/MethodHandleNatives.java index 82d5d898638..40272644e28 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodHandleNatives.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodHandleNatives.java @@ -115,6 +115,8 @@ class MethodHandleNatives { /** Which conv-ops are implemented by the JVM? */ static final int CONV_OP_IMPLEMENTED_MASK; + /** Derived mode flag. Only false on some old JVM implementations. */ + static final boolean HAVE_RICOCHET_FRAMES; private static native void registerNatives(); static { @@ -141,6 +143,7 @@ class MethodHandleNatives { if (CONV_OP_IMPLEMENTED_MASK_ == 0) CONV_OP_IMPLEMENTED_MASK_ = DEFAULT_CONV_OP_IMPLEMENTED_MASK; CONV_OP_IMPLEMENTED_MASK = CONV_OP_IMPLEMENTED_MASK_; + HAVE_RICOCHET_FRAMES = (CONV_OP_IMPLEMENTED_MASK & (1<int, Object->T) OP_CHECK_CAST = 0x2, // ref-to-ref conversion; requires a Class argument OP_PRIM_TO_PRIM = 0x3, // converts from one primitive to another OP_REF_TO_PRIM = 0x4, // unboxes a wrapper to produce a primitive - OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper (NYI) + OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper OP_SWAP_ARGS = 0x6, // swap arguments (vminfo is 2nd arg) OP_ROT_ARGS = 0x7, // rotate arguments (vminfo is displaced arg) OP_DUP_ARGS = 0x8, // duplicates one or more arguments (at TOS) OP_DROP_ARGS = 0x9, // remove one or more argument slots - OP_COLLECT_ARGS = 0xA, // combine one or more arguments into a varargs (NYI) + OP_COLLECT_ARGS = 0xA, // combine arguments using an auxiliary function OP_SPREAD_ARGS = 0xB, // expand in place a varargs array (of known size) - OP_FLYBY = 0xC, // operate first on reified argument list (NYI) - OP_RICOCHET = 0xD, // run an adapter chain on the return value (NYI) + OP_FOLD_ARGS = 0xC, // combine but do not remove arguments; prepend result + //OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists CONV_OP_LIMIT = 0xE; // limit of CONV_OP enumeration /** Shift and mask values for decoding the AMH.conversion field. * These numbers are shared with the JVM for creating AMHs. */ static final int CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field + CONV_TYPE_MASK = 0x0F, // fits T_ADDRESS and below CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK @@ -244,8 +248,9 @@ class MethodHandleNatives { T_LONG = 11, T_OBJECT = 12, //T_ARRAY = 13 - T_VOID = 14; + T_VOID = 14, //T_ADDRESS = 15 + T_ILLEGAL = 99; /** * Constant pool reference-kind codes, as used by CONSTANT_MethodHandle CP entries. @@ -273,16 +278,29 @@ class MethodHandleNatives { try { Field con = Constants.class.getDeclaredField(name); int jval = con.getInt(null); - if (jval != vmval) - throw new InternalError(name+": JVM has "+vmval+" while Java has "+jval); + if (jval == vmval) continue; + String err = (name+": JVM has "+vmval+" while Java has "+jval); + if (name.equals("CONV_OP_LIMIT")) { + System.err.println("warning: "+err); + continue; + } + throw new InternalError(err); } catch (Exception ex) { + if (ex instanceof NoSuchFieldException) { + String err = (name+": JVM has "+vmval+" which Java does not define"); + // ignore exotic ops the JVM cares about; we just wont issue them + if (name.startsWith("OP_") || name.startsWith("GC_")) { + System.err.println("warning: "+err); + continue; + } + } throw new InternalError(name+": access failed, got "+ex); } } return true; } static { - verifyConstants(); + assert(verifyConstants()); } // Up-calls from the JVM. @@ -313,7 +331,7 @@ class MethodHandleNatives { } /** - * The JVM wants to use a MethodType with invokeGeneric. Give the runtime fair warning. + * The JVM wants to use a MethodType with inexact invoke. Give the runtime fair warning. */ static void notifyGenericMethodType(MethodType type) { type.form().notifyGenericMethodType(); @@ -323,15 +341,39 @@ class MethodHandleNatives { * The JVM wants to raise an exception. Here's the path. */ static void raiseException(int code, Object actual, Object required) { - String message; - // disregard the identity of the actual object, if it is not a class: - if (!(actual instanceof Class) && !(actual instanceof MethodType)) - actual = actual.getClass(); - if (actual != null) - message = "required "+required+" but encountered "+actual; - else - message = "required "+required; + String message = null; switch (code) { + case 190: // arraylength + try { + String reqLength = ""; + if (required instanceof AdapterMethodHandle) { + int conv = ((AdapterMethodHandle)required).getConversion(); + int spChange = AdapterMethodHandle.extractStackMove(conv); + reqLength = " of length "+(spChange+1); + } + int actualLength = actual == null ? 0 : java.lang.reflect.Array.getLength(actual); + message = "required array"+reqLength+", but encountered wrong length "+actualLength; + break; + } catch (IllegalArgumentException ex) { + } + required = Object[].class; // should have been an array + code = 192; // checkcast + break; + } + // disregard the identity of the actual object, if it is not a class: + if (message == null) { + if (!(actual instanceof Class) && !(actual instanceof MethodType)) + actual = actual.getClass(); + if (actual != null) + message = "required "+required+" but encountered "+actual; + else + message = "required "+required; + } + switch (code) { + case 190: // arraylength + throw new ArrayIndexOutOfBoundsException(message); + case 50: //_aaload + throw new ClassCastException(message); case 192: // checkcast throw new ClassCastException(message); default: @@ -365,4 +407,13 @@ class MethodHandleNatives { throw err; } } + + /** + * This assertion marks code which was written before ricochet frames were implemented. + * Such code will go away when the ports catch up. + */ + static boolean workaroundWithoutRicochetFrames() { + assert(!HAVE_RICOCHET_FRAMES) : "this code should not be executed if `-XX:+UseRicochetFrames is enabled"; + return true; + } } diff --git a/jdk/src/share/classes/java/lang/invoke/MethodHandleStatics.java b/jdk/src/share/classes/java/lang/invoke/MethodHandleStatics.java index 6ea8f978b63..3eeeec71ac3 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodHandleStatics.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodHandleStatics.java @@ -63,8 +63,17 @@ package java.lang.invoke; } static void checkSpreadArgument(Object av, int n) { - if (av == null ? n != 0 : ((Object[])av).length != n) - throw newIllegalArgumentException("Array is not of length "+n); + if (av == null) { + if (n == 0) return; + } else if (av instanceof Object[]) { + int len = ((Object[])av).length; + if (len == n) return; + } else { + int len = java.lang.reflect.Array.getLength(av); + if (len == n) return; + } + // fall through to error: + throw newIllegalArgumentException("Array is not of length "+n); } // handy shared exception makers (they simplify the common case code) @@ -80,6 +89,9 @@ package java.lang.invoke; /*non-public*/ static RuntimeException newIllegalArgumentException(String message, Object obj) { return new IllegalArgumentException(message(message, obj)); } + /*non-public*/ static RuntimeException newIllegalArgumentException(String message, Object obj, Object obj2) { + return new IllegalArgumentException(message(message, obj, obj2)); + } /*non-public*/ static Error uncaughtException(Exception ex) { Error err = new InternalError("uncaught exception"); err.initCause(ex); @@ -89,4 +101,8 @@ package java.lang.invoke; if (obj != null) message = message + ": " + obj; return message; } + private static String message(String message, Object obj, Object obj2) { + if (obj != null || obj2 != null) message = message + ": " + obj + ", " + obj2; + return message; + } } diff --git a/jdk/src/share/classes/java/lang/invoke/MethodHandles.java b/jdk/src/share/classes/java/lang/invoke/MethodHandles.java index 12e36da623a..99985a09fee 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodHandles.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodHandles.java @@ -190,7 +190,7 @@ public class MethodHandles { * is not symbolically accessible from the lookup class's loader, * the lookup can still succeed. * For example, lookups for {@code MethodHandle.invokeExact} and - * {@code MethodHandle.invokeGeneric} will always succeed, regardless of requested type. + * {@code MethodHandle.invoke} will always succeed, regardless of requested type. *
    • If there is a security manager installed, it can forbid the lookup * on various grounds (see below). * By contrast, the {@code ldc} instruction is not subject to @@ -590,10 +590,10 @@ public class MethodHandles { * Because of the general equivalence between {@code invokevirtual} * instructions and method handles produced by {@code findVirtual}, * if the class is {@code MethodHandle} and the name string is - * {@code invokeExact} or {@code invokeGeneric}, the resulting + * {@code invokeExact} or {@code invoke}, the resulting * method handle is equivalent to one produced by * {@link java.lang.invoke.MethodHandles#exactInvoker MethodHandles.exactInvoker} or - * {@link java.lang.invoke.MethodHandles#genericInvoker MethodHandles.genericInvoker} + * {@link java.lang.invoke.MethodHandles#invoker MethodHandles.invoker} * with the same {@code type} argument. * * @param refc the class or interface from which the method is accessed @@ -1080,7 +1080,7 @@ return mh1; MethodType rawType = mh.type(); if (rawType.parameterType(0) == caller) return mh; MethodType narrowType = rawType.changeParameterType(0, caller); - MethodHandle narrowMH = MethodHandleImpl.convertArguments(mh, narrowType, rawType, null); + MethodHandle narrowMH = MethodHandleImpl.convertArguments(mh, narrowType, rawType, 0); return fixVarargs(narrowMH, mh); } @@ -1148,7 +1148,7 @@ return mh1; *
    • an {@code Object[]} array containing more arguments *
    *

    - * The invoker will behave like a call to {@link MethodHandle#invokeGeneric invokeGeneric} with + * The invoker will behave like a call to {@link MethodHandle#invoke invoke} with * the indicated {@code type}. * That is, if the target is exactly of the given {@code type}, it will behave * like {@code invokeExact}; otherwise it behave as if {@link MethodHandle#asType asType} @@ -1166,7 +1166,7 @@ return mh1; *

    * This method is equivalent to the following code (though it may be more efficient): *

    -MethodHandle invoker = MethodHandles.genericInvoker(type);
    +MethodHandle invoker = MethodHandles.invoker(type);
     int spreadArgCount = type.parameterCount - objectArgCount;
     invoker = invoker.asSpreader(Object[].class, spreadArgCount);
     return invoker;
    @@ -1186,7 +1186,7 @@ return invoker;
     
         /**
          * Produces a special invoker method handle which can be used to
    -     * invoke any method handle of the given type, as if by {@code invokeExact}.
    +     * invoke any method handle of the given type, as if by {@link MethodHandle#invokeExact invokeExact}.
          * The resulting invoker will have a type which is
          * exactly equal to the desired type, except that it will accept
          * an additional leading argument of type {@code MethodHandle}.
    @@ -1203,7 +1203,7 @@ publicLookup().findVirtual(MethodHandle.class, "invokeExact", type)
          * For example, to emulate an {@code invokeExact} call to a variable method
          * handle {@code M}, extract its type {@code T},
          * look up the invoker method {@code X} for {@code T},
    -     * and call the invoker method, as {@code X.invokeGeneric(T, A...)}.
    +     * and call the invoker method, as {@code X.invoke(T, A...)}.
          * (It would not work to call {@code X.invokeExact}, since the type {@code T}
          * is unknown.)
          * If spreading, collecting, or other argument transformations are required,
    @@ -1212,7 +1212,7 @@ publicLookup().findVirtual(MethodHandle.class, "invokeExact", type)
          * 

    * (Note: The invoker method is not available via the Core Reflection API. * An attempt to call {@linkplain java.lang.reflect.Method#invoke Method.invoke} - * on the declared {@code invokeExact} or {@code invokeGeneric} method will raise an + * on the declared {@code invokeExact} or {@code invoke} method will raise an * {@link java.lang.UnsupportedOperationException UnsupportedOperationException}.) *

    * This method throws no reflective or security exceptions. @@ -1226,20 +1226,20 @@ publicLookup().findVirtual(MethodHandle.class, "invokeExact", type) /** * Produces a special invoker method handle which can be used to - * invoke any method handle of the given type, as if by {@code invokeGeneric}. + * invoke any method handle compatible with the given type, as if by {@link MethodHandle#invoke invoke}. * The resulting invoker will have a type which is * exactly equal to the desired type, except that it will accept * an additional leading argument of type {@code MethodHandle}. *

    * Before invoking its target, the invoker will apply reference casts as - * necessary and unbox and widen primitive arguments, as if by {@link #convertArguments convertArguments}. - * The return value of the invoker will be an {@code Object} reference, - * boxing a primitive value if the original type returns a primitive, - * and always null if the original type returns void. + * necessary and box, unbox, or widen primitive values, as if by {@link MethodHandle#asType asType}. + * Similarly, the return value will be converted as necessary. + * If the target is a {@linkplain MethodHandle#asVarargsCollector variable arity method handle}, + * the required arity conversion will be made, again as if by {@link MethodHandle#asType asType}. *

    * This method is equivalent to the following code (though it may be more efficient): *

    -publicLookup().findVirtual(MethodHandle.class, "invokeGeneric", type)
    +publicLookup().findVirtual(MethodHandle.class, "invoke", type)
          * 
    *

    * This method throws no reflective or security exceptions. @@ -1247,8 +1247,17 @@ publicLookup().findVirtual(MethodHandle.class, "invokeGeneric", type) * @return a method handle suitable for invoking any method handle convertible to the given type */ static public + MethodHandle invoker(MethodType type) { + return type.invokers().generalInvoker(); + } + + /** + * Temporary alias for {@link #invoker}, for backward compatibility with some versions of JSR 292. + * @deprecated Will be removed for JSR 292 Proposed Final Draft. + */ + public static MethodHandle genericInvoker(MethodType type) { - return type.invokers().genericInvoker(); + return invoker(type); } /** @@ -1368,18 +1377,7 @@ publicLookup().findVirtual(MethodHandle.class, "invokeGeneric", type) */ public static MethodHandle convertArguments(MethodHandle target, MethodType newType) { - MethodType oldType = target.type(); - if (oldType.equals(newType)) - return target; - MethodHandle res = null; - try { - res = MethodHandleImpl.convertArguments(target, - newType, oldType, null); - } catch (IllegalArgumentException ex) { - } - if (res == null) - throw new WrongMethodTypeException("cannot convert to "+newType+": "+target); - return res; + return MethodHandleImpl.convertArguments(target, newType, 1); } /** @@ -1422,7 +1420,7 @@ publicLookup().findVirtual(MethodHandle.class, "invokeGeneric", type) */ public static MethodHandle explicitCastArguments(MethodHandle target, MethodType newType) { - return convertArguments(target, newType); // FIXME! + return MethodHandleImpl.convertArguments(target, newType, 2); } /* @@ -1517,23 +1515,32 @@ assert((int)twice.invokeExact(21) == 42); MethodHandle permuteArguments(MethodHandle target, MethodType newType, int... reorder) { MethodType oldType = target.type(); checkReorder(reorder, newType, oldType); - return MethodHandleImpl.convertArguments(target, + return MethodHandleImpl.permuteArguments(target, newType, oldType, reorder); } private static void checkReorder(int[] reorder, MethodType newType, MethodType oldType) { + if (newType.returnType() != oldType.returnType()) + throw newIllegalArgumentException("return types do not match", + oldType, newType); if (reorder.length == oldType.parameterCount()) { int limit = newType.parameterCount(); boolean bad = false; - for (int i : reorder) { + for (int j = 0; j < reorder.length; j++) { + int i = reorder[j]; if (i < 0 || i >= limit) { bad = true; break; } + Class src = newType.parameterType(i); + Class dst = oldType.parameterType(j); + if (src != dst) + throw newIllegalArgumentException("parameter types do not match after reorder", + oldType, newType); } if (!bad) return; } - throw newIllegalArgumentException("bad reorder array"); + throw newIllegalArgumentException("bad reorder array: "+Arrays.toString(reorder)); } /** @@ -1622,7 +1629,7 @@ assert((int)twice.invokeExact(21) == 42); if (type == void.class) throw newIllegalArgumentException("void type"); Wrapper w = Wrapper.forPrimitiveType(type); - return identity(type).bindTo(w.convert(value, type)); + return insertArguments(identity(type), 0, w.convert(value, type)); } else { return identity(type).bindTo(type.cast(value)); } @@ -1857,7 +1864,8 @@ assertEquals("XY", (String) f2.invokeExact("x", "y")); // XY MethodHandle filterArguments(MethodHandle target, int pos, MethodHandle... filters) { MethodType targetType = target.type(); MethodHandle adapter = target; - MethodType adapterType = targetType; + MethodType adapterType = null; + assert((adapterType = targetType) != null); int maxPos = targetType.parameterCount(); if (pos + filters.length > maxPos) throw newIllegalArgumentException("too many filters"); @@ -1865,19 +1873,23 @@ assertEquals("XY", (String) f2.invokeExact("x", "y")); // XY for (MethodHandle filter : filters) { curPos += 1; if (filter == null) continue; // ignore null elements of filters - MethodType filterType = filter.type(); - if (filterType.parameterCount() != 1 - || filterType.returnType() != targetType.parameterType(curPos)) - throw newIllegalArgumentException("target and filter types do not match"); - adapterType = adapterType.changeParameterType(curPos, filterType.parameterType(0)); - adapter = MethodHandleImpl.filterArgument(adapter, curPos, filter); + adapter = filterArgument(adapter, curPos, filter); + assert((adapterType = adapterType.changeParameterType(curPos, filter.type().parameterType(0))) != null); } - MethodType midType = adapter.type(); - if (midType != adapterType) - adapter = MethodHandleImpl.convertArguments(adapter, adapterType, midType, null); + assert(adapterType.equals(adapter.type())); return adapter; } + /*non-public*/ static + MethodHandle filterArgument(MethodHandle target, int pos, MethodHandle filter) { + MethodType targetType = target.type(); + MethodType filterType = filter.type(); + if (filterType.parameterCount() != 1 + || filterType.returnType() != targetType.parameterType(pos)) + throw newIllegalArgumentException("target and filter types do not match", targetType, filterType); + return MethodHandleImpl.filterArgument(target, pos, filter); + } + /** * Adapts a target method handle {@code target} by post-processing * its return value with a unary filter function. @@ -1913,14 +1925,26 @@ System.out.println((int) f0.invokeExact("x", "y")); // 2 MethodHandle filterReturnValue(MethodHandle target, MethodHandle filter) { MethodType targetType = target.type(); MethodType filterType = filter.type(); - if (filterType.parameterCount() != 1 - || filterType.parameterType(0) != targetType.returnType()) - throw newIllegalArgumentException("target and filter types do not match"); + Class rtype = targetType.returnType(); + int filterValues = filterType.parameterCount(); + if (filterValues == 0 + ? (rtype != void.class) + : (rtype != filterType.parameterType(0))) + throw newIllegalArgumentException("target and filter types do not match", target, filter); // result = fold( lambda(retval, arg...) { filter(retval) }, // lambda( arg...) { target(arg...) } ) + MethodType newType = targetType.changeReturnType(filterType.returnType()); + MethodHandle result = null; + if (AdapterMethodHandle.canCollectArguments(filterType, targetType, 0, false)) { + result = AdapterMethodHandle.makeCollectArguments(filter, target, 0, false); + if (result != null) return result; + } // FIXME: Too many nodes here. - MethodHandle returner = dropArguments(filter, 1, targetType.parameterList()); - return foldArguments(returner, target); + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this class is deprecated + MethodHandle returner = dropArguments(filter, filterValues, targetType.parameterList()); + result = foldArguments(returner, target); + assert(result.type().equals(newType)); + return result; } /** @@ -1972,16 +1996,23 @@ System.out.println((int) f0.invokeExact("x", "y")); // 2 MethodHandle foldArguments(MethodHandle target, MethodHandle combiner) { MethodType targetType = target.type(); MethodType combinerType = combiner.type(); + int foldPos = 0; // always at the head, at present int foldArgs = combinerType.parameterCount(); - boolean ok = (targetType.parameterCount() >= 1 + foldArgs); - if (ok && !combinerType.parameterList().equals(targetType.parameterList().subList(1, foldArgs+1))) + int foldVals = combinerType.returnType() == void.class ? 0 : 1; + int afterInsertPos = foldPos + foldVals; + boolean ok = (targetType.parameterCount() >= afterInsertPos + foldArgs); + if (ok && !(combinerType.parameterList() + .equals(targetType.parameterList().subList(afterInsertPos, + afterInsertPos + foldArgs)))) ok = false; - if (ok && !combinerType.returnType().equals(targetType.parameterType(0))) + if (ok && foldVals != 0 && !combinerType.returnType().equals(targetType.parameterType(0))) ok = false; if (!ok) throw misMatchedTypes("target and combiner types", targetType, combinerType); - MethodType newType = targetType.dropParameterTypes(0, 1); - return MethodHandleImpl.foldArguments(target, newType, combiner); + MethodType newType = targetType.dropParameterTypes(foldPos, afterInsertPos); + MethodHandle res = MethodHandleImpl.foldArguments(target, newType, foldPos, combiner); + if (res == null) throw newIllegalArgumentException("cannot fold from "+newType+" to " +targetType); + return res; } /** @@ -2142,7 +2173,7 @@ System.out.println((int) f0.invokeExact("x", "y")); // 2 * the given {@code target} on the incoming arguments, * and returning or throwing whatever the {@code target} * returns or throws. The invocation will be as if by - * {@code target.invokeGeneric}. + * {@code target.invoke}. * The target's type will be checked before the * instance is created, as if by a call to {@code asType}, * which may result in a {@code WrongMethodTypeException}. diff --git a/jdk/src/share/classes/java/lang/invoke/MethodType.java b/jdk/src/share/classes/java/lang/invoke/MethodType.java index a7fa147fa32..02c12c69aad 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodType.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodType.java @@ -25,6 +25,7 @@ package java.lang.invoke; +import sun.invoke.util.Wrapper; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -39,7 +40,7 @@ import static java.lang.invoke.MethodHandleStatics.*; * matched between a method handle and all its callers, * and the JVM's operations enforce this matching at, specifically * during calls to {@link MethodHandle#invokeExact MethodHandle.invokeExact} - * and {@link MethodHandle#invokeGeneric MethodHandle.invokeGeneric}, and during execution + * and {@link MethodHandle#invoke MethodHandle.invoke}, and during execution * of {@code invokedynamic} instructions. *

    * The structure is a return type accompanied by any number of parameter types. @@ -262,18 +263,18 @@ class MethodType implements java.io.Serializable { * Finds or creates a method type whose components are {@code Object} with an optional trailing {@code Object[]} array. * Convenience method for {@link #methodType(java.lang.Class, java.lang.Class[]) methodType}. * All parameters and the return type will be {@code Object}, - * except the final varargs parameter if any, which will be {@code Object[]}. - * @param objectArgCount number of parameters (excluding the varargs parameter if any) - * @param varargs whether there will be a varargs parameter, of type {@code Object[]} - * @return a totally generic method type, given only its count of parameters and varargs - * @throws IllegalArgumentException if {@code objectArgCount} is negative or greater than 255 + * except the final array parameter if any, which will be {@code Object[]}. + * @param objectArgCount number of parameters (excluding the final array parameter if any) + * @param finalArray whether there will be a trailing array parameter, of type {@code Object[]} + * @return a generally applicable method type, for all calls of the given fixed argument count and a collected array of further arguments + * @throws IllegalArgumentException if {@code objectArgCount} is negative or greater than 255 (or 254, if {@code finalArray}) * @see #genericMethodType(int) */ public static - MethodType genericMethodType(int objectArgCount, boolean varargs) { + MethodType genericMethodType(int objectArgCount, boolean finalArray) { MethodType mt; checkSlotCount(objectArgCount); - int ivarargs = (!varargs ? 0 : 1); + int ivarargs = (!finalArray ? 0 : 1); int ootIndex = objectArgCount*2 + ivarargs; if (ootIndex < objectOnlyTypes.length) { mt = objectOnlyTypes[ootIndex]; @@ -294,7 +295,7 @@ class MethodType implements java.io.Serializable { * Convenience method for {@link #methodType(java.lang.Class, java.lang.Class[]) methodType}. * All parameters and the return type will be Object. * @param objectArgCount number of parameters - * @return a totally generic method type, given only its count of parameters + * @return a generally applicable method type, for all calls of the given argument count * @throws IllegalArgumentException if {@code objectArgCount} is negative or greater than 255 * @see #genericMethodType(int, boolean) */ @@ -626,6 +627,30 @@ class MethodType implements java.io.Serializable { return sb.toString(); } + + /*non-public*/ + boolean isConvertibleTo(MethodType newType) { + if (!canConvert(returnType(), newType.returnType())) + return false; + int argc = parameterCount(); + if (argc != newType.parameterCount()) + return false; + for (int i = 0; i < argc; i++) { + if (!canConvert(newType.parameterType(i), parameterType(i))) + return false; + } + return true; + } + private static boolean canConvert(Class src, Class dst) { + if (src == dst || dst == void.class) return true; + if (src.isPrimitive() && dst.isPrimitive()) { + if (!Wrapper.forPrimitiveType(dst) + .isConvertibleFrom(Wrapper.forPrimitiveType(src))) + return false; + } + return true; + } + /// Queries which have to do with the bytecode architecture /** Reports the number of JVM stack slots required to invoke a method diff --git a/jdk/src/share/classes/java/lang/invoke/MethodTypeForm.java b/jdk/src/share/classes/java/lang/invoke/MethodTypeForm.java index db73b24d067..6ad4a5a8eea 100644 --- a/jdk/src/share/classes/java/lang/invoke/MethodTypeForm.java +++ b/jdk/src/share/classes/java/lang/invoke/MethodTypeForm.java @@ -46,6 +46,7 @@ class MethodTypeForm { final long argCounts; // packed slot & value counts final long primCounts; // packed prim & double counts final int vmslots; // total number of parameter slots + private Object vmlayout; // vm-specific information for calls final MethodType erasedType; // the canonical erasure /*lazy*/ MethodType primsAsBoxes; // replace prims by wrappers @@ -59,7 +60,7 @@ class MethodTypeForm { /*lazy*/ FromGeneric fromGeneric; // convert cs. w/o prims to with /*lazy*/ SpreadGeneric[] spreadGeneric; // expand one argument to many /*lazy*/ FilterGeneric filterGeneric; // convert argument(s) on the fly - /*lazy*/ MethodHandle genericInvoker; // hook for invokeGeneric + /*lazy*/ MethodHandle genericInvoker; // hook for inexact invoke public MethodType erasedType() { return erasedType; @@ -460,9 +461,9 @@ class MethodTypeForm { if (genericInvoker != null) return; try { // Trigger adapter creation. - genericInvoker = InvokeGeneric.genericInvokerOf(erasedType); + genericInvoker = InvokeGeneric.generalInvokerOf(erasedType); } catch (Exception ex) { - Error err = new InternalError("Exception while resolving invokeGeneric"); + Error err = new InternalError("Exception while resolving inexact invoke"); err.initCause(ex); throw err; } diff --git a/jdk/src/share/classes/java/lang/invoke/SpreadGeneric.java b/jdk/src/share/classes/java/lang/invoke/SpreadGeneric.java index a862723e025..be160667381 100644 --- a/jdk/src/share/classes/java/lang/invoke/SpreadGeneric.java +++ b/jdk/src/share/classes/java/lang/invoke/SpreadGeneric.java @@ -66,6 +66,10 @@ class SpreadGeneric { this.entryPoint = ep[0]; } + static { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this class is deprecated + } + /** From targetType remove the last spreadCount arguments, and instead * append a simple Object argument. */ diff --git a/jdk/src/share/classes/java/lang/invoke/ToGeneric.java b/jdk/src/share/classes/java/lang/invoke/ToGeneric.java index 22723975d5b..2d46a3910e1 100644 --- a/jdk/src/share/classes/java/lang/invoke/ToGeneric.java +++ b/jdk/src/share/classes/java/lang/invoke/ToGeneric.java @@ -96,7 +96,7 @@ class ToGeneric { ToGeneric va2 = ToGeneric.of(primsAtEnd); this.adapter = va2.adapter; if (true) throw new UnsupportedOperationException("NYI: primitive parameters must follow references; entryType = "+entryType); - this.entryPoint = MethodHandleImpl.convertArguments( + this.entryPoint = MethodHandleImpl.permuteArguments( va2.entryPoint, primsAtEnd, entryType, primsAtEndOrder); // example: for entryType of (int,Object,Object), the reordered // type is (Object,Object,int) and the order is {1,2,0}, @@ -128,7 +128,7 @@ class ToGeneric { assert(eptWithInts.parameterType(i) == int.class); MethodType nextType = midType.changeParameterType(i, int.class); rawEntryPoint = MethodHandleImpl.convertArguments( - rawEntryPoint, nextType, midType, null); + rawEntryPoint, nextType, midType, 0); midType = nextType; } } @@ -152,6 +152,10 @@ class ToGeneric { this.invoker = makeRawArgumentFilter(invoker0, rawEntryTypeInit, entryType); } + static { + assert(MethodHandleNatives.workaroundWithoutRicochetFrames()); // this class is deprecated + } + /** A generic argument list will be created by a call of type 'raw'. * The values need to be reboxed for to match 'cooked'. * Do this on the fly. @@ -171,7 +175,7 @@ class ToGeneric { invoker.type().generic(), invoker, 0, MethodHandle.class); if (filteredInvoker == null) throw new UnsupportedOperationException("NYI"); } - MethodHandle reboxer = ValueConversions.rebox(dst, false); + MethodHandle reboxer = ValueConversions.rebox(dst); filteredInvoker = FilterGeneric.makeArgumentFilter(1+i, reboxer, filteredInvoker); if (filteredInvoker == null) throw new InternalError(); } @@ -199,13 +203,13 @@ class ToGeneric { assert(!rret.isPrimitive()); if (rret == Object.class && !mustCast) return null; - return ValueConversions.cast(tret, false); + return ValueConversions.cast(tret); } else if (tret == rret) { - return ValueConversions.unbox(tret, false); + return ValueConversions.unbox(tret); } else { assert(rret.isPrimitive()); assert(tret == double.class ? rret == long.class : rret == int.class); - return ValueConversions.unboxRaw(tret, false); + return ValueConversions.unboxRaw(tret); } } @@ -311,7 +315,7 @@ class ToGeneric { } static Adapter buildAdapterFromBytecodes(MethodType entryPointType) { - throw new UnsupportedOperationException("NYI"); + throw new UnsupportedOperationException("NYI: "+entryPointType); } /** diff --git a/jdk/src/share/classes/java/lang/invoke/package-info.java b/jdk/src/share/classes/java/lang/invoke/package-info.java index b6a1992dca8..8ec18c461fe 100644 --- a/jdk/src/share/classes/java/lang/invoke/package-info.java +++ b/jdk/src/share/classes/java/lang/invoke/package-info.java @@ -185,7 +185,7 @@ * The method handle constant produced for such a method behaves as if * it were created by {@link java.lang.invoke.MethodHandle#asVarargsCollector asVarargsCollector}. * In other words, the constant method handle will exhibit variable arity, - * when invoked via {@code invokeGeneric}. + * when invoked via {@code MethodHandle.invoke}. * On the other hand, its behavior with respect to {@code invokeExact} will be the same * as if the {@code varargs} bit were not set. *

    @@ -243,7 +243,7 @@ *

  • optionally, one or more additional static arguments
  • * * The method handle is then applied to the other values as if by - * {@link java.lang.invoke.MethodHandle#invokeGeneric invokeGeneric}. + * {@link java.lang.invoke.MethodHandle#invoke MethodHandle.invoke}. * The returned result must be a {@link java.lang.invoke.CallSite CallSite} (or a subclass). * The type of the call site's target must be exactly equal to the type * derived from the dynamic call site's type descriptor and passed to @@ -251,7 +251,7 @@ * The call site then becomes permanently linked to the dynamic call site. *

    * As long as each bootstrap method can be correctly invoked - * by invokeGeneric, its detailed type is arbitrary. + * by MethodHandle.invoke, its detailed type is arbitrary. * For example, the first argument could be {@code Object} * instead of {@code MethodHandles.Lookup}, and the return type * could also be {@code Object} instead of {@code CallSite}. @@ -272,7 +272,7 @@ * (i.e., a {@code CONSTANT_Class}, {@code CONSTANT_MethodType}, * or {@code CONSTANT_MethodHandle} argument cannot be linked) *

  • the bootstrap method has the wrong arity, - * causing {@code invokeGeneric} to throw {@code WrongMethodTypeException}
  • + * causing {@code MethodHandle.invoke} to throw {@code WrongMethodTypeException} *
  • the bootstrap method has a wrong argument or return type
  • *
  • the bootstrap method invocation completes abnormally
  • *
  • the result from the bootstrap invocation is not a reference to @@ -381,10 +381,10 @@ * those values will be passed as additional arguments to the method handle. * (Note that because there is a limit of 255 arguments to any method, * at most 252 extra arguments can be supplied.) - * The bootstrap method will be invoked as if by either {@code invokeGeneric} + * The bootstrap method will be invoked as if by either {@code MethodHandle.invoke} * or {@code invokeWithArguments}. (There is no way to tell the difference.) *

    - * The normal argument conversion rules for {@code invokeGeneric} apply to all stacked arguments. + * The normal argument conversion rules for {@code MethodHandle.invoke} apply to all stacked arguments. * For example, if a pushed value is a primitive type, it may be converted to a reference by boxing conversion. * If the bootstrap method is a variable arity method (its modifier bit {@code 0x0080} is set), * then some or all of the arguments specified here may be collected into a trailing array parameter. @@ -419,8 +419,8 @@ * For example, the fourth argument could be {@code MethodHandle}, * if that is the type of the corresponding constant in * the {@code CONSTANT_InvokeDynamic} entry. - * In that case, the {@code invokeGeneric} call will pass the extra method handle - * constant as an {@code Object}, but the type matching machinery of {@code invokeGeneric} + * In that case, the {@code MethodHandle.invoke} call will pass the extra method handle + * constant as an {@code Object}, but the type matching machinery of {@code MethodHandle.invoke} * will cast the reference back to {@code MethodHandle} before invoking the bootstrap method. * (If a string constant were passed instead, by badly generated code, that cast would then fail, * resulting in a {@code BootstrapMethodError}.) diff --git a/jdk/src/share/classes/java/net/SocketOption.java b/jdk/src/share/classes/java/net/SocketOption.java index 65dfe3c5a6b..d3c5972ee02 100644 --- a/jdk/src/share/classes/java/net/SocketOption.java +++ b/jdk/src/share/classes/java/net/SocketOption.java @@ -38,7 +38,7 @@ package java.net; * * @since 1.7 * - * @see StandardSocketOption + * @see StandardSocketOptions */ public interface SocketOption { diff --git a/jdk/src/share/classes/java/net/StandardSocketOption.java b/jdk/src/share/classes/java/net/StandardSocketOptions.java similarity index 99% rename from jdk/src/share/classes/java/net/StandardSocketOption.java rename to jdk/src/share/classes/java/net/StandardSocketOptions.java index bc9589689d3..b9bea8d0ab5 100644 --- a/jdk/src/share/classes/java/net/StandardSocketOption.java +++ b/jdk/src/share/classes/java/net/StandardSocketOptions.java @@ -38,8 +38,8 @@ package java.net; * @since 1.7 */ -public final class StandardSocketOption { - private StandardSocketOption() { } +public final class StandardSocketOptions { + private StandardSocketOptions() { } // -- SOL_SOCKET -- diff --git a/jdk/src/share/classes/java/nio/channels/AsynchronousServerSocketChannel.java b/jdk/src/share/classes/java/nio/channels/AsynchronousServerSocketChannel.java index c2b74d21c49..1b5a67f3c6d 100644 --- a/jdk/src/share/classes/java/nio/channels/AsynchronousServerSocketChannel.java +++ b/jdk/src/share/classes/java/nio/channels/AsynchronousServerSocketChannel.java @@ -58,11 +58,11 @@ import java.io.IOException; * Description * * - * {@link java.net.StandardSocketOption#SO_RCVBUF SO_RCVBUF} + * {@link java.net.StandardSocketOptions#SO_RCVBUF SO_RCVBUF} * The size of the socket receive buffer * * - * {@link java.net.StandardSocketOption#SO_REUSEADDR SO_REUSEADDR} + * {@link java.net.StandardSocketOptions#SO_REUSEADDR SO_REUSEADDR} * Re-use address * * diff --git a/jdk/src/share/classes/java/nio/channels/AsynchronousSocketChannel.java b/jdk/src/share/classes/java/nio/channels/AsynchronousSocketChannel.java index 99692b38494..d78800b65d3 100644 --- a/jdk/src/share/classes/java/nio/channels/AsynchronousSocketChannel.java +++ b/jdk/src/share/classes/java/nio/channels/AsynchronousSocketChannel.java @@ -68,23 +68,23 @@ import java.nio.ByteBuffer; * Description * * - * {@link java.net.StandardSocketOption#SO_SNDBUF SO_SNDBUF} + * {@link java.net.StandardSocketOptions#SO_SNDBUF SO_SNDBUF} * The size of the socket send buffer * * - * {@link java.net.StandardSocketOption#SO_RCVBUF SO_RCVBUF} + * {@link java.net.StandardSocketOptions#SO_RCVBUF SO_RCVBUF} * The size of the socket receive buffer * * - * {@link java.net.StandardSocketOption#SO_KEEPALIVE SO_KEEPALIVE} + * {@link java.net.StandardSocketOptions#SO_KEEPALIVE SO_KEEPALIVE} * Keep connection alive * * - * {@link java.net.StandardSocketOption#SO_REUSEADDR SO_REUSEADDR} + * {@link java.net.StandardSocketOptions#SO_REUSEADDR SO_REUSEADDR} * Re-use address * * - * {@link java.net.StandardSocketOption#TCP_NODELAY TCP_NODELAY} + * {@link java.net.StandardSocketOptions#TCP_NODELAY TCP_NODELAY} * Disable the Nagle algorithm * * diff --git a/jdk/src/share/classes/java/nio/channels/DatagramChannel.java b/jdk/src/share/classes/java/nio/channels/DatagramChannel.java index 0d99dc4e0d9..f4a6110b017 100644 --- a/jdk/src/share/classes/java/nio/channels/DatagramChannel.java +++ b/jdk/src/share/classes/java/nio/channels/DatagramChannel.java @@ -63,37 +63,37 @@ import java.nio.channels.spi.SelectorProvider; * Description * * - * {@link java.net.StandardSocketOption#SO_SNDBUF SO_SNDBUF} + * {@link java.net.StandardSocketOptions#SO_SNDBUF SO_SNDBUF} * The size of the socket send buffer * * - * {@link java.net.StandardSocketOption#SO_RCVBUF SO_RCVBUF} + * {@link java.net.StandardSocketOptions#SO_RCVBUF SO_RCVBUF} * The size of the socket receive buffer * * - * {@link java.net.StandardSocketOption#SO_REUSEADDR SO_REUSEADDR} + * {@link java.net.StandardSocketOptions#SO_REUSEADDR SO_REUSEADDR} * Re-use address * * - * {@link java.net.StandardSocketOption#SO_BROADCAST SO_BROADCAST} + * {@link java.net.StandardSocketOptions#SO_BROADCAST SO_BROADCAST} * Allow transmission of broadcast datagrams * * - * {@link java.net.StandardSocketOption#IP_TOS IP_TOS} + * {@link java.net.StandardSocketOptions#IP_TOS IP_TOS} * The Type of Service (ToS) octet in the Internet Protocol (IP) header * * - * {@link java.net.StandardSocketOption#IP_MULTICAST_IF IP_MULTICAST_IF} + * {@link java.net.StandardSocketOptions#IP_MULTICAST_IF IP_MULTICAST_IF} * The network interface for Internet Protocol (IP) multicast datagrams * * - * {@link java.net.StandardSocketOption#IP_MULTICAST_TTL + * {@link java.net.StandardSocketOptions#IP_MULTICAST_TTL * IP_MULTICAST_TTL} * The time-to-live for Internet Protocol (IP) multicast * datagrams * * - * {@link java.net.StandardSocketOption#IP_MULTICAST_LOOP + * {@link java.net.StandardSocketOptions#IP_MULTICAST_LOOP * IP_MULTICAST_LOOP} * Loopback for Internet Protocol (IP) multicast datagrams * diff --git a/jdk/src/share/classes/java/nio/channels/MulticastChannel.java b/jdk/src/share/classes/java/nio/channels/MulticastChannel.java index d22f0d5a58e..5b61f77ff8a 100644 --- a/jdk/src/share/classes/java/nio/channels/MulticastChannel.java +++ b/jdk/src/share/classes/java/nio/channels/MulticastChannel.java @@ -30,7 +30,7 @@ import java.net.NetworkInterface; import java.io.IOException; import java.net.ProtocolFamily; // javadoc import java.net.StandardProtocolFamily; // javadoc -import java.net.StandardSocketOption; // javadoc +import java.net.StandardSocketOptions; // javadoc /** * A network channel that supports Internet Protocol (IP) multicasting. @@ -93,7 +93,7 @@ import java.net.StandardSocketOption; // javadoc * a specific address, rather than the wildcard address then it is implementation * specific if multicast datagrams are received by the socket.

  • * - *
  • The {@link StandardSocketOption#SO_REUSEADDR SO_REUSEADDR} option should be + *

  • The {@link StandardSocketOptions#SO_REUSEADDR SO_REUSEADDR} option should be * enabled prior to {@link NetworkChannel#bind binding} the socket. This is * required to allow multiple members of the group to bind to the same * address.

  • @@ -107,9 +107,9 @@ import java.net.StandardSocketOption; // javadoc * NetworkInterface ni = NetworkInterface.getByName("hme0"); * * DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET) - * .setOption(StandardSocketOption.SO_REUSEADDR, true) + * .setOption(StandardSocketOptions.SO_REUSEADDR, true) * .bind(new InetSocketAddress(5000)) - * .setOption(StandardSocketOption.IP_MULTICAST_IF, ni); + * .setOption(StandardSocketOptions.IP_MULTICAST_IF, ni); * * InetAddress group = InetAddress.getByName("225.4.5.6"); * diff --git a/jdk/src/share/classes/java/nio/channels/NetworkChannel.java b/jdk/src/share/classes/java/nio/channels/NetworkChannel.java index f8a169d8c0a..4d4abfdb75c 100644 --- a/jdk/src/share/classes/java/nio/channels/NetworkChannel.java +++ b/jdk/src/share/classes/java/nio/channels/NetworkChannel.java @@ -124,7 +124,7 @@ public interface NetworkChannel * @throws IOException * If an I/O error occurs * - * @see java.net.StandardSocketOption + * @see java.net.StandardSocketOptions */ NetworkChannel setOption(SocketOption name, T value) throws IOException; @@ -144,7 +144,7 @@ public interface NetworkChannel * @throws IOException * If an I/O error occurs * - * @see java.net.StandardSocketOption + * @see java.net.StandardSocketOptions */ T getOption(SocketOption name) throws IOException; diff --git a/jdk/src/share/classes/java/nio/channels/ServerSocketChannel.java b/jdk/src/share/classes/java/nio/channels/ServerSocketChannel.java index 2711594a57e..127fb5bcd40 100644 --- a/jdk/src/share/classes/java/nio/channels/ServerSocketChannel.java +++ b/jdk/src/share/classes/java/nio/channels/ServerSocketChannel.java @@ -52,11 +52,11 @@ import java.nio.channels.spi.SelectorProvider; * Description * * - * {@link java.net.StandardSocketOption#SO_RCVBUF SO_RCVBUF} + * {@link java.net.StandardSocketOptions#SO_RCVBUF SO_RCVBUF} * The size of the socket receive buffer * * - * {@link java.net.StandardSocketOption#SO_REUSEADDR SO_REUSEADDR} + * {@link java.net.StandardSocketOptions#SO_REUSEADDR SO_REUSEADDR} * Re-use address * * diff --git a/jdk/src/share/classes/java/nio/channels/SocketChannel.java b/jdk/src/share/classes/java/nio/channels/SocketChannel.java index 03ac42a3e94..d5f43ba7750 100644 --- a/jdk/src/share/classes/java/nio/channels/SocketChannel.java +++ b/jdk/src/share/classes/java/nio/channels/SocketChannel.java @@ -72,28 +72,28 @@ import java.nio.channels.spi.SelectorProvider; * Description * * - * {@link java.net.StandardSocketOption#SO_SNDBUF SO_SNDBUF} + * {@link java.net.StandardSocketOptions#SO_SNDBUF SO_SNDBUF} * The size of the socket send buffer * * - * {@link java.net.StandardSocketOption#SO_RCVBUF SO_RCVBUF} + * {@link java.net.StandardSocketOptions#SO_RCVBUF SO_RCVBUF} * The size of the socket receive buffer * * - * {@link java.net.StandardSocketOption#SO_KEEPALIVE SO_KEEPALIVE} + * {@link java.net.StandardSocketOptions#SO_KEEPALIVE SO_KEEPALIVE} * Keep connection alive * * - * {@link java.net.StandardSocketOption#SO_REUSEADDR SO_REUSEADDR} + * {@link java.net.StandardSocketOptions#SO_REUSEADDR SO_REUSEADDR} * Re-use address * * - * {@link java.net.StandardSocketOption#SO_LINGER SO_LINGER} + * {@link java.net.StandardSocketOptions#SO_LINGER SO_LINGER} * Linger on close if data is present (when configured in blocking mode * only) * * - * {@link java.net.StandardSocketOption#TCP_NODELAY TCP_NODELAY} + * {@link java.net.StandardSocketOptions#TCP_NODELAY TCP_NODELAY} * Disable the Nagle algorithm * * diff --git a/jdk/src/share/classes/java/nio/charset/Charset.java b/jdk/src/share/classes/java/nio/charset/Charset.java index 51dbe1504dc..1a30ba96103 100644 --- a/jdk/src/share/classes/java/nio/charset/Charset.java +++ b/jdk/src/share/classes/java/nio/charset/Charset.java @@ -215,7 +215,7 @@ import sun.security.action.GetPropertyAction; * determined during virtual-machine startup and typically depends upon the * locale and charset being used by the underlying operating system.

    * - *

    The {@link StandardCharset} class defines constants for each of the + *

    The {@link StandardCharsets} class defines constants for each of the * standard charsets. * *

    Terminology

    diff --git a/jdk/src/share/classes/java/nio/charset/StandardCharset.java b/jdk/src/share/classes/java/nio/charset/StandardCharsets.java similarity index 96% rename from jdk/src/share/classes/java/nio/charset/StandardCharset.java rename to jdk/src/share/classes/java/nio/charset/StandardCharsets.java index edec058bc45..b2572c361cf 100644 --- a/jdk/src/share/classes/java/nio/charset/StandardCharset.java +++ b/jdk/src/share/classes/java/nio/charset/StandardCharsets.java @@ -32,10 +32,10 @@ package java.nio.charset; * @see Standard Charsets * @since 1.7 */ -public final class StandardCharset { +public final class StandardCharsets { - private StandardCharset() { - throw new AssertionError("No java.nio.charset.StandardCharset instances for you!"); + private StandardCharsets() { + throw new AssertionError("No java.nio.charset.StandardCharsets instances for you!"); } /** * Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the diff --git a/jdk/src/share/classes/java/nio/file/Path.java b/jdk/src/share/classes/java/nio/file/Path.java index 2d2e977d49d..370c38dbac2 100644 --- a/jdk/src/share/classes/java/nio/file/Path.java +++ b/jdk/src/share/classes/java/nio/file/Path.java @@ -72,7 +72,7 @@ import java.util.Iterator; * directory and is UTF-8 encoded. *
      *     Path path = FileSystems.getDefault().getPath("logs", "access.log");
    - *     BufferReader reader = Files.newBufferedReader(path, StandardCharset.UTF_8);
    + *     BufferReader reader = Files.newBufferedReader(path, StandardCharsets.UTF_8);
      * 
    * *

    Interoperability

    @@ -609,11 +609,11 @@ public interface Path * directory can be watched. The {@code events} parameter is the events to * register and may contain the following events: *
      - *
    • {@link StandardWatchEventKind#ENTRY_CREATE ENTRY_CREATE} - + *
    • {@link StandardWatchEventKinds#ENTRY_CREATE ENTRY_CREATE} - * entry created or moved into the directory
    • - *
    • {@link StandardWatchEventKind#ENTRY_DELETE ENTRY_DELETE} - + *
    • {@link StandardWatchEventKinds#ENTRY_DELETE ENTRY_DELETE} - * entry deleted or moved out of the directory
    • - *
    • {@link StandardWatchEventKind#ENTRY_MODIFY ENTRY_MODIFY} - + *
    • {@link StandardWatchEventKinds#ENTRY_MODIFY ENTRY_MODIFY} - * entry in directory was modified
    • *
    * @@ -622,7 +622,7 @@ public interface Path * that locates the directory entry that is created, deleted, or modified. * *

    The set of events may include additional implementation specific - * event that are not defined by the enum {@link StandardWatchEventKind} + * event that are not defined by the enum {@link StandardWatchEventKinds} * *

    The {@code modifiers} parameter specifies modifiers that * qualify how the directory is registered. This release does not define any diff --git a/jdk/src/share/classes/java/nio/file/StandardWatchEventKind.java b/jdk/src/share/classes/java/nio/file/StandardWatchEventKinds.java similarity index 94% rename from jdk/src/share/classes/java/nio/file/StandardWatchEventKind.java rename to jdk/src/share/classes/java/nio/file/StandardWatchEventKinds.java index 6064d75a7fa..ef111e2bf7d 100644 --- a/jdk/src/share/classes/java/nio/file/StandardWatchEventKind.java +++ b/jdk/src/share/classes/java/nio/file/StandardWatchEventKinds.java @@ -31,8 +31,8 @@ package java.nio.file; * @since 1.7 */ -public final class StandardWatchEventKind { - private StandardWatchEventKind() { } +public final class StandardWatchEventKinds { + private StandardWatchEventKinds() { } /** * A special event to indicate that events may have been lost or @@ -44,8 +44,8 @@ public final class StandardWatchEventKind { * * @see WatchService */ - public static final WatchEvent.Kind OVERFLOW = - new StdWatchEventKind("OVERFLOW", Void.class); + public static final WatchEvent.Kind OVERFLOW = + new StdWatchEventKind("OVERFLOW", Object.class); /** * Directory entry created. diff --git a/jdk/src/share/classes/java/nio/file/WatchEvent.java b/jdk/src/share/classes/java/nio/file/WatchEvent.java index 4328828fe22..cab199f0b57 100644 --- a/jdk/src/share/classes/java/nio/file/WatchEvent.java +++ b/jdk/src/share/classes/java/nio/file/WatchEvent.java @@ -50,7 +50,7 @@ public interface WatchEvent { * An event kind, for the purposes of identification. * * @since 1.7 - * @see StandardWatchEventKind + * @see StandardWatchEventKinds */ public static interface Kind { /** @@ -98,9 +98,9 @@ public interface WatchEvent { /** * Returns the context for the event. * - *

    In the case of {@link StandardWatchEventKind#ENTRY_CREATE ENTRY_CREATE}, - * {@link StandardWatchEventKind#ENTRY_DELETE ENTRY_DELETE}, and {@link - * StandardWatchEventKind#ENTRY_MODIFY ENTRY_MODIFY} events the context is + *

    In the case of {@link StandardWatchEventKinds#ENTRY_CREATE ENTRY_CREATE}, + * {@link StandardWatchEventKinds#ENTRY_DELETE ENTRY_DELETE}, and {@link + * StandardWatchEventKinds#ENTRY_MODIFY ENTRY_MODIFY} events the context is * a {@code Path} that is the {@link Path#relativize relative} path between * the directory registered with the watch service, and the entry that is * created, deleted, or modified. diff --git a/jdk/src/share/classes/java/nio/file/WatchService.java b/jdk/src/share/classes/java/nio/file/WatchService.java index 58e633610c0..5a63fcd8722 100644 --- a/jdk/src/share/classes/java/nio/file/WatchService.java +++ b/jdk/src/share/classes/java/nio/file/WatchService.java @@ -68,7 +68,7 @@ import java.util.concurrent.TimeUnit; * of events that it may accumulate. Where an implementation knowingly * discards events then it arranges for the key's {@link WatchKey#pollEvents * pollEvents} method to return an element with an event type of {@link - * StandardWatchEventKind#OVERFLOW OVERFLOW}. This event can be used by the + * StandardWatchEventKinds#OVERFLOW OVERFLOW}. This event can be used by the * consumer as a trigger to re-examine the state of the object. * *

    When an event is reported to indicate that a file in a watched directory @@ -87,7 +87,7 @@ import java.util.concurrent.TimeUnit; * are detected, their timeliness, and whether their ordering is preserved are * highly implementation specific. For example, when a file in a watched * directory is modified then it may result in a single {@link - * StandardWatchEventKind#ENTRY_MODIFY ENTRY_MODIFY} event in some + * StandardWatchEventKinds#ENTRY_MODIFY ENTRY_MODIFY} event in some * implementations but several events in other implementations. Short-lived * files (meaning files that are deleted very quickly after they are created) * may not be detected by primitive implementations that periodically poll the diff --git a/jdk/src/share/classes/java/nio/file/Watchable.java b/jdk/src/share/classes/java/nio/file/Watchable.java index 41ef35200e7..f84437c5c69 100644 --- a/jdk/src/share/classes/java/nio/file/Watchable.java +++ b/jdk/src/share/classes/java/nio/file/Watchable.java @@ -53,7 +53,7 @@ public interface Watchable { * those specified by the {@code events} and {@code modifiers} parameters. * Changing the event set does not cause pending events for the object to be * discarded. Objects are automatically registered for the {@link - * StandardWatchEventKind#OVERFLOW OVERFLOW} event. This event is not + * StandardWatchEventKinds#OVERFLOW OVERFLOW} event. This event is not * required to be present in the array of events. * *

    Otherwise the file system object has not yet been registered with the diff --git a/jdk/src/share/classes/java/sql/BatchUpdateException.java b/jdk/src/share/classes/java/sql/BatchUpdateException.java index 4fa6241169f..8405da2dac7 100644 --- a/jdk/src/share/classes/java/sql/BatchUpdateException.java +++ b/jdk/src/share/classes/java/sql/BatchUpdateException.java @@ -89,7 +89,7 @@ public class BatchUpdateException extends SQLException { * The cause is not initialized, and may subsequently be * initialized by a call to the * {@link Throwable#initCause(java.lang.Throwable)} method. The vendor code - * is intialized to 0. + * is initialized to 0. *

    * * @param reason a description of the exception @@ -188,7 +188,7 @@ public class BatchUpdateException extends SQLException { * @since 1.6 */ public BatchUpdateException(Throwable cause) { - this(null, null, 0, null, cause); + this((cause == null ? null : cause.toString()), null, 0, null, cause); } /** @@ -214,7 +214,7 @@ public class BatchUpdateException extends SQLException { * @since 1.6 */ public BatchUpdateException(int []updateCounts , Throwable cause) { - this(null, null, 0, updateCounts, cause); + this((cause == null ? null : cause.toString()), null, 0, updateCounts, cause); } /** diff --git a/jdk/src/share/classes/java/util/Formatter.java b/jdk/src/share/classes/java/util/Formatter.java index 5ca126ab31d..bcd36ea6120 100644 --- a/jdk/src/share/classes/java/util/Formatter.java +++ b/jdk/src/share/classes/java/util/Formatter.java @@ -826,7 +826,7 @@ import sun.misc.FormattedFloatingDecimal; * *

  • Float and Double * - *
  • BigDecimal + *
  • BigDecimal * * * @@ -1362,7 +1362,7 @@ import sun.misc.FormattedFloatingDecimal; * precision is not provided, then all of the digits as returned by {@link * Double#toHexString(double)} will be output. * - *

    BigDecimal + *

    BigDecimal * *

    The following conversions may be applied {@link java.math.BigDecimal * BigDecimal}. @@ -1372,7 +1372,7 @@ import sun.misc.FormattedFloatingDecimal; * {@code 'e'} * '\u0065' * Requires the output to be formatted using computerized scientific notation. The computerized scientific notation. The localization algorithm is applied. * *

    The formatting of the magnitude m depends upon its value. @@ -1427,11 +1427,11 @@ import sun.misc.FormattedFloatingDecimal; * *

    If m is greater than or equal to 10-4 but less * than 10precision then it is represented in decimal format. + * href="#bdecimal">decimal format. * *

    If m is less than 10-4 or greater than or equal to * 10precision, then it is represented in computerized scientific notation. + * href="#bscientific">computerized scientific notation. * *

    The total number of significant digits in m is equal to the * precision. If the precision is not specified, then the default value is @@ -1447,7 +1447,7 @@ import sun.misc.FormattedFloatingDecimal; * * {@code 'f'} * '\u0066' - * Requires the output to be formatted using decimal + * Requires the output to be formatted using decimal * format. The localization algorithm is * applied. * diff --git a/jdk/src/share/classes/java/util/concurrent/Phaser.java b/jdk/src/share/classes/java/util/concurrent/Phaser.java index c4ebe2378cb..bdafbbb7a9d 100644 --- a/jdk/src/share/classes/java/util/concurrent/Phaser.java +++ b/jdk/src/share/classes/java/util/concurrent/Phaser.java @@ -159,7 +159,7 @@ import java.util.concurrent.locks.LockSupport; * void runTasks(List tasks) { * final Phaser phaser = new Phaser(1); // "1" to register self * // create and start threads - * for (Runnable task : tasks) { + * for (final Runnable task : tasks) { * phaser.register(); * new Thread() { * public void run() { diff --git a/jdk/src/share/classes/java/util/concurrent/locks/LockSupport.java b/jdk/src/share/classes/java/util/concurrent/locks/LockSupport.java index f0cd3bc0456..7b829f63a06 100644 --- a/jdk/src/share/classes/java/util/concurrent/locks/LockSupport.java +++ b/jdk/src/share/classes/java/util/concurrent/locks/LockSupport.java @@ -275,10 +275,14 @@ public class LockSupport { * snapshot -- the thread may have since unblocked or blocked on a * different blocker object. * + * @param t the thread * @return the blocker + * @throws NullPointerException if argument is null * @since 1.6 */ public static Object getBlocker(Thread t) { + if (t == null) + throw new NullPointerException(); return unsafe.getObjectVolatile(t, parkBlockerOffset); } diff --git a/jdk/src/share/classes/java/util/logging/LogManager.java b/jdk/src/share/classes/java/util/logging/LogManager.java index cf49a0a69b7..a28b0d8a824 100644 --- a/jdk/src/share/classes/java/util/logging/LogManager.java +++ b/jdk/src/share/classes/java/util/logging/LogManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -342,12 +342,35 @@ public class LogManager { // already been created with the given name it is returned. // Otherwise a new logger instance is created and registered // in the LogManager global namespace. + + // This method will always return a non-null Logger object. + // Synchronization is not required here. All synchronization for + // adding a new Logger object is handled by addLogger(). Logger demandLogger(String name) { Logger result = getLogger(name); if (result == null) { - result = new Logger(name, null); - addLogger(result); - result = getLogger(name); + // only allocate the new logger once + Logger newLogger = new Logger(name, null); + do { + if (addLogger(newLogger)) { + // We successfully added the new Logger that we + // created above so return it without refetching. + return newLogger; + } + + // We didn't add the new Logger that we created above + // because another thread added a Logger with the same + // name after our null check above and before our call + // to addLogger(). We have to refetch the Logger because + // addLogger() returns a boolean instead of the Logger + // reference itself. However, if the thread that created + // the other Logger is not holding a strong reference to + // the other Logger, then it is possible for the other + // Logger to be GC'ed after we saw it in addLogger() and + // before we can refetch it. If it has been GC'ed then + // we'll just loop around and try again. + result = getLogger(name); + } while (result == null); } return result; } diff --git a/jdk/src/share/classes/java/util/logging/Logger.java b/jdk/src/share/classes/java/util/logging/Logger.java index 87f6867169f..e3ce6bdac60 100644 --- a/jdk/src/share/classes/java/util/logging/Logger.java +++ b/jdk/src/share/classes/java/util/logging/Logger.java @@ -310,7 +310,20 @@ public class Logger { * @return a suitable Logger * @throws NullPointerException if the name is null. */ - public static synchronized Logger getLogger(String name) { + + // Synchronization is not required here. All synchronization for + // adding a new Logger object is handled by LogManager.addLogger(). + public static Logger getLogger(String name) { + // This method is intentionally not a wrapper around a call + // to getLogger(name, resourceBundleName). If it were then + // this sequence: + // + // getLogger("Foo", "resourceBundleForFoo"); + // getLogger("Foo"); + // + // would throw an IllegalArgumentException in the second call + // because the wrapper would result in an attempt to replace + // the existing "resourceBundleForFoo" with null. LogManager manager = LogManager.getLogManager(); return manager.demandLogger(name); } @@ -355,7 +368,10 @@ public class Logger { * a different resource bundle name. * @throws NullPointerException if the name is null. */ - public static synchronized Logger getLogger(String name, String resourceBundleName) { + + // Synchronization is not required here. All synchronization for + // adding a new Logger object is handled by LogManager.addLogger(). + public static Logger getLogger(String name, String resourceBundleName) { LogManager manager = LogManager.getLogManager(); Logger result = manager.demandLogger(name); if (result.resourceBundleName == null) { @@ -417,7 +433,10 @@ public class Logger { * @throws MissingResourceException if the resourceBundleName is non-null and * no corresponding resource can be found. */ - public static synchronized Logger getAnonymousLogger(String resourceBundleName) { + + // Synchronization is not required here. All synchronization for + // adding a new anonymous Logger object is handled by doSetParent(). + public static Logger getAnonymousLogger(String resourceBundleName) { LogManager manager = LogManager.getLogManager(); // cleanup some Loggers that have been GC'ed manager.drainLoggerRefQueueBounded(); diff --git a/jdk/src/share/classes/java/util/regex/Pattern.java b/jdk/src/share/classes/java/util/regex/Pattern.java index fa302cfbab7..d89eca9845d 100644 --- a/jdk/src/share/classes/java/util/regex/Pattern.java +++ b/jdk/src/share/classes/java/util/regex/Pattern.java @@ -213,7 +213,7 @@ import java.util.Arrays; * A character in the Greek block (block) * \p{Lu} * An uppercase letter (category) - * \p{isAlphabetic} + * \p{IsAlphabetic} * An alphabetic character (binary property) * \p{Sc} * A currency symbol diff --git a/jdk/src/share/classes/java/util/zip/ZipCoder.java b/jdk/src/share/classes/java/util/zip/ZipCoder.java index 62844916f3d..12404cb38ac 100644 --- a/jdk/src/share/classes/java/util/zip/ZipCoder.java +++ b/jdk/src/share/classes/java/util/zip/ZipCoder.java @@ -28,7 +28,7 @@ package java.util.zip; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.charset.Charset; -import java.nio.charset.StandardCharset; +import java.nio.charset.StandardCharsets; import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; import java.nio.charset.CoderResult; @@ -107,7 +107,7 @@ final class ZipCoder { if (isUTF8) return getBytes(s); if (utf8 == null) - utf8 = new ZipCoder(StandardCharset.UTF_8); + utf8 = new ZipCoder(StandardCharsets.UTF_8); return utf8.getBytes(s); } @@ -116,7 +116,7 @@ final class ZipCoder { if (isUTF8) return toString(ba, len); if (utf8 == null) - utf8 = new ZipCoder(StandardCharset.UTF_8); + utf8 = new ZipCoder(StandardCharsets.UTF_8); return utf8.toString(ba, len); } @@ -132,7 +132,7 @@ final class ZipCoder { private ZipCoder(Charset cs) { this.cs = cs; - this.isUTF8 = cs.name().equals(StandardCharset.UTF_8.name()); + this.isUTF8 = cs.name().equals(StandardCharsets.UTF_8.name()); } static ZipCoder get(Charset charset) { diff --git a/jdk/src/share/classes/java/util/zip/ZipFile.java b/jdk/src/share/classes/java/util/zip/ZipFile.java index d1861ad732c..9693809331a 100644 --- a/jdk/src/share/classes/java/util/zip/ZipFile.java +++ b/jdk/src/share/classes/java/util/zip/ZipFile.java @@ -31,7 +31,7 @@ import java.io.IOException; import java.io.EOFException; import java.io.File; import java.nio.charset.Charset; -import java.nio.charset.StandardCharset; +import java.nio.charset.StandardCharsets; import java.util.ArrayDeque; import java.util.Deque; import java.util.Enumeration; @@ -141,7 +141,7 @@ class ZipFile implements ZipConstants, Closeable { * @since 1.3 */ public ZipFile(File file, int mode) throws IOException { - this(file, mode, StandardCharset.UTF_8); + this(file, mode, StandardCharsets.UTF_8); } /** diff --git a/jdk/src/share/classes/java/util/zip/ZipInputStream.java b/jdk/src/share/classes/java/util/zip/ZipInputStream.java index ebfcce146cc..a60adb8040e 100644 --- a/jdk/src/share/classes/java/util/zip/ZipInputStream.java +++ b/jdk/src/share/classes/java/util/zip/ZipInputStream.java @@ -30,7 +30,7 @@ import java.io.IOException; import java.io.EOFException; import java.io.PushbackInputStream; import java.nio.charset.Charset; -import java.nio.charset.StandardCharset; +import java.nio.charset.StandardCharsets; import static java.util.zip.ZipConstants64.*; /** @@ -76,7 +76,7 @@ class ZipInputStream extends InflaterInputStream implements ZipConstants { * @param in the actual input stream */ public ZipInputStream(InputStream in) { - this(in, StandardCharset.UTF_8); + this(in, StandardCharsets.UTF_8); } /** diff --git a/jdk/src/share/classes/java/util/zip/ZipOutputStream.java b/jdk/src/share/classes/java/util/zip/ZipOutputStream.java index c0b59488002..91f7f75ece2 100644 --- a/jdk/src/share/classes/java/util/zip/ZipOutputStream.java +++ b/jdk/src/share/classes/java/util/zip/ZipOutputStream.java @@ -28,7 +28,7 @@ package java.util.zip; import java.io.OutputStream; import java.io.IOException; import java.nio.charset.Charset; -import java.nio.charset.StandardCharset; +import java.nio.charset.StandardCharsets; import java.util.Vector; import java.util.HashSet; import static java.util.zip.ZipConstants64.*; @@ -101,7 +101,7 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { * @param out the actual output stream */ public ZipOutputStream(OutputStream out) { - this(out, StandardCharset.UTF_8); + this(out, StandardCharsets.UTF_8); } /** diff --git a/jdk/src/share/classes/javax/imageio/metadata/doc-files/jpeg_metadata.html b/jdk/src/share/classes/javax/imageio/metadata/doc-files/jpeg_metadata.html index 721bdb2e702..5d3b0ea5e7d 100644 --- a/jdk/src/share/classes/javax/imageio/metadata/doc-files/jpeg_metadata.html +++ b/jdk/src/share/classes/javax/imageio/metadata/doc-files/jpeg_metadata.html @@ -211,6 +211,20 @@ to any listeners.

    +Optional ColorSpace support: +Handling of PhotoYCC (YCC), PhotoYCCA (YCCA), RGBA and YCbCrA color spaces +by the standard plugin, as described below, is dependent on capabilities +of the libraries used to interpret the JPEG data. Thus all consequential +behaviors are optional. If the support is not available when decoding, +the color space will be treated as unrecognized and the appropriate +default color space for the specified number of component channels +may be used. +When writing, an Exception may be thrown if no suitable conversion +can be applied before encoding. +But where the support for these color spaces is available, the behavior +must be as documented. +

    + When reading, the contents of the stream are interpreted by the usual JPEG conventions, as follows: @@ -241,8 +255,11 @@ JPEG conventions, as follows: 2-channel images are assumed to be grayscale with an alpha channel. For 3- and 4-channel images, the component ids are consulted. If these values are 1-3 for a 3-channel image, then the image is assumed to be - YCbCr. If these values are 1-4 for a 4-channel image, then the image - is assumed to be YCbCrA. If these values are > 4, they are checked + YCbCr. Subject to the availability of the + optional color space support + described above, if these values are 1-4 for a 4-channel image, + then the image is assumed to be YCbCrA. + If these values are > 4, they are checked against the ASCII codes for 'R', 'G', 'B', 'A', 'C', 'c'. These can encode the following colorspaces:

    @@ -346,12 +363,16 @@ If no metadata object is specified, then the following defaults apply: component ids in the frame and scan headers are set to 1, 2, and 3. -

  • RGBA images are converted to YCbCrA, subsampled in the +
  • Subject to the optional library support + described above, + RGBA images are converted to YCbCrA, subsampled in the chrominance channels by half both vertically and horizontally, and written without any special marker segments. The component ids in the frame and scan headers are set to 1, 2, 3, and 4. -
  • PhotoYCC and YCCAimages are subsampled by half in the chrominance +
  • Subject to the optional library support + described above, + PhotoYCC and YCCAimages are subsampled by half in the chrominance channels both vertically and horizontally and written with an Adobe APP14 marker segment and 'Y','C', and 'c' (and 'A' if an alpha channel is present) as component ids in the frame @@ -433,6 +454,8 @@ in the frame header node of the metadata object, regardless of color space.)
  • RGBA images: + Subject to the optional library support + described above,
    • If an app0JFIF node is present in the metadata object, it is ignored and a warning is sent to listeners, as JFIF does not @@ -456,6 +479,8 @@ in the frame header node of the metadata object, regardless of color space.)
  • PhotoYCC Images: + Subject to the optional library support + described above,
    • If an app0JFIF node is present in the metadata object, the image is converted to sRGB, and then to YCbCr during encoding, @@ -471,6 +496,8 @@ in the frame header node of the metadata object, regardless of color space.)
  • PhotoYCCA Images: + Subject to the optional library support + described above,
    • If an app0JFIF node is present in the metadata object, it is ignored and a warning is sent to listeners, as JFIF does not diff --git a/jdk/src/share/classes/javax/management/loading/package.html b/jdk/src/share/classes/javax/management/loading/package.html index 7b7ca68a99f..1cfeef73bfa 100644 --- a/jdk/src/share/classes/javax/management/loading/package.html +++ b/jdk/src/share/classes/javax/management/loading/package.html @@ -2,7 +2,7 @@ javax.management.loading package