8003426: Remove UseFastAccessors and UseFastEmptyMethods except for zero

These options have been long disabled in Xmixed mode because they prevent these small methods from being inlined and are subject to bit rot, and we don't need more macro assembler code to maintain and change if the constant pool cache format changes.

Reviewed-by: simonis, kvn
This commit is contained in:
Coleen Phillimore 2014-08-12 10:48:55 -04:00
parent 8a690a1250
commit cafb36661d
31 changed files with 320 additions and 1769 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved. * Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -26,8 +26,9 @@
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP #ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP #define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
address generate_normal_entry(void); address generate_normal_entry(bool synchronized);
address generate_native_entry(void); address generate_native_entry(bool synchronized);
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
void lock_method(void); void lock_method(void);
void unlock_method(void); void unlock_method(void);

View file

@ -938,8 +938,9 @@ void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
// Interpreter stub for calling a native method. (C++ interpreter) // Interpreter stub for calling a native method. (C++ interpreter)
// This sets up a somewhat different looking stack for calling the native method // This sets up a somewhat different looking stack for calling the native method
// than the typical interpreter frame setup. // than the typical interpreter frame setup.
// The synchronized parameter is ignored.
// //
address CppInterpreterGenerator::generate_native_entry(void) { address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
if (native_entry != NULL) return native_entry; if (native_entry != NULL) return native_entry;
address entry = __ pc(); address entry = __ pc();
@ -1729,7 +1730,8 @@ void CppInterpreterGenerator::generate_more_monitors() {
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused __ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
} }
address CppInterpreterGenerator::generate_normal_entry(void) { // The synchronized parameter is ignored
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
if (interpreter_frame_manager != NULL) return interpreter_frame_manager; if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
address entry = __ pc(); address entry = __ pc();
@ -2789,38 +2791,6 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
return interpreter_frame_manager; return interpreter_frame_manager;
} }
// Generate code for various sorts of method entries
//
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized : break;
case Interpreter::native : // Fall thru
case Interpreter::native_synchronized : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry(); break;
case Interpreter::empty : break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
// These are special interpreter intrinsics which we don't support so far.
case Interpreter::java_lang_math_sin : break;
case Interpreter::java_lang_math_cos : break;
case Interpreter::java_lang_math_tan : break;
case Interpreter::java_lang_math_abs : break;
case Interpreter::java_lang_math_log : break;
case Interpreter::java_lang_math_log10 : break;
case Interpreter::java_lang_math_sqrt : break;
case Interpreter::java_lang_math_pow : break;
case Interpreter::java_lang_math_exp : break;
case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}
if (entry_point) {
return entry_point;
}
return ((InterpreterGenerator*)this)->generate_normal_entry();
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code) InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) { : CppInterpreterGenerator(code) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved. * Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -31,7 +31,12 @@
private: private:
address generate_abstract_entry(void); address generate_abstract_entry(void);
address generate_accessor_entry(void); address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(void); address generate_Reference_get_entry(void);
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP #endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP

View file

@ -428,6 +428,19 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
return entry; return entry;
} }
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry = __ pc();
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
assert(normal_entry != NULL, "should already be generated.");
__ branch_to_entry(normal_entry, R11_scratch1);
__ flush();
return entry;
}
// Abstract method entry. // Abstract method entry.
// //
address InterpreterGenerator::generate_abstract_entry(void) { address InterpreterGenerator::generate_abstract_entry(void) {
@ -485,203 +498,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
return entry; return entry;
} }
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_accessor_entry(void) {
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
return NULL;
}
Label Lslow_path, Lacquire;
const Register
Rclass_or_obj = R3_ARG1,
Rconst_method = R4_ARG2,
Rcodes = Rconst_method,
Rcpool_cache = R5_ARG3,
Rscratch = R11_scratch1,
Rjvmti_mode = Rscratch,
Roffset = R12_scratch2,
Rflags = R6_ARG4,
Rbtable = R7_ARG5;
static address branch_table[number_of_states];
address entry = __ pc();
// Check for safepoint:
// Ditch this, real man don't need safepoint checks.
// Also check for JVMTI mode
// Check for null obj, take slow path if so.
__ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
__ cmpdi(CCR1, Rclass_or_obj, 0);
__ cmpwi(CCR0, Rjvmti_mode, 0);
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
// Do 2 things in parallel:
// 1. Load the index out of the first instruction word, which looks like this:
// <0x2a><0xb4><index (2 byte, native endianess)>.
// 2. Load constant pool cache base.
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
// Get the const pool entry by means of <index>.
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
// Check if cpool cache entry is resolved.
// We are resolved if the indices offset contains the current bytecode.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Big Endian:
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
__ bne(CCR0, Lslow_path);
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
// Finally, start loading the value: Get cp cache entry into regs.
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
// Following code is from templateTable::getfield_or_static
// Load pointer to branch table
__ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
// Get volatile flag
__ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
// note: sync is needed before volatile load on PPC64
// Check field type
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
#ifdef ASSERT
Label LFlagInvalid;
__ cmpldi(CCR0, Rflags, number_of_states);
__ bge(CCR0, LFlagInvalid);
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x543);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
// Load from branch table and dispatch (volatile case: one instruction ahead)
__ sldi(Rflags, Rflags, LogBytesPerWord);
__ cmpwi(CCR6, Rscratch, 1); // volatile?
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
}
__ ldx(Rbtable, Rbtable, Rflags);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
}
__ mtctr(Rbtable);
__ bctr();
#ifdef ASSERT
__ bind(LFlagInvalid);
__ stop("got invalid flag", 0x6541);
bool all_uninitialized = true,
all_initialized = true;
for (int i = 0; i<number_of_states; ++i) {
all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
all_initialized = all_initialized && (branch_table[i] != NULL);
}
assert(all_uninitialized != all_initialized, "consistency"); // either or
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
__ stop("unexpected type", 0x6551);
#endif
if (branch_table[itos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[ltos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[btos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[btos] = __ pc(); // non-volatile_entry point
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
__ extsb(R3_RET, R3_RET);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[ctos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[stos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[atos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[atos] = __ pc(); // non-volatile_entry point
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
__ verify_oop(R3_RET);
//__ dcbt(R3_RET); // prefetch
__ beq(CCR6, Lacquire);
__ blr();
}
__ align(32, 12);
__ bind(Lacquire);
__ twi_0(R3_RET);
__ isync(); // acquire
__ blr();
#ifdef ASSERT
for (int i = 0; i<number_of_states; ++i) {
assert(branch_table[i], "accessor_entry initialization");
//tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
}
#endif
__ bind(Lslow_path);
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
__ flush();
return entry;
}
// Interpreter intrinsic for WeakReference.get(). // Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value // 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly // into R8 and return quickly
@ -713,7 +529,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// and so we don't need to call the G1 pre-barrier. Thus we can use the // and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE. // regular method entry code to generate the NPE.
// //
// This code is based on generate_accessor_enty.
address entry = __ pc(); address entry = __ pc();
@ -768,7 +583,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
return entry; return entry;
} else { } else {
return generate_accessor_entry(); return generate_jump_to_normal_entry();
} }
} }

View file

@ -30,7 +30,6 @@
address generate_normal_entry(bool synchronized); address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized); address generate_native_entry(bool synchronized);
address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false); void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
void unlock_method(bool check_exceptions = true); void unlock_method(bool check_exceptions = true);

View file

@ -602,48 +602,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
// End of helpers // End of helpers
// ============================================================================
// Various method entries
//
// Empty method, generate a very fast return. We must skip this entry if
// someone's debugging, indicated by the flag
// "interp_mode" in the Thread obj.
// Note: empty methods are generated mostly methods that do assertions, which are
// disabled in the "java opt build".
address TemplateInterpreterGenerator::generate_empty_entry(void) {
if (!UseFastEmptyMethods) {
NOT_PRODUCT(__ should_not_reach_here();)
return Interpreter::entry_for_kind(Interpreter::zerolocals);
}
Label Lslow_path;
const Register Rjvmti_mode = R11_scratch1;
address entry = __ pc();
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
__ cmpwi(CCR0, Rjvmti_mode, 0);
__ bne(CCR0, Lslow_path); // jvmti_mode!=0
// Noone's debuggin: Simply return.
// Pop c2i arguments (if any) off when we return.
#ifdef ASSERT
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x545);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
// And we're done.
__ blr();
__ bind(Lslow_path);
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
__ flush();
return entry;
}
// Support abs and sqrt like in compiler. // Support abs and sqrt like in compiler.
// For others we can use a normal (native) entry. // For others we can use a normal (native) entry.
@ -1289,45 +1247,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry; return entry;
} }
// =============================================================================
// Entry points
address AbstractInterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
// Determine code generation flags.
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}
if (entry_point) {
return entry_point;
}
return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer // These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version. // the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) { bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@ -1355,7 +1274,7 @@ int AbstractInterpreter::size_activation(int max_stack,
int callee_locals, int callee_locals,
bool is_top_frame) { bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup // Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry. // in InterpreterGenerator::generate_fixed_frame.
assert(Interpreter::stackElementWords == 1, "sanity"); assert(Interpreter::stackElementWords == 1, "sanity");
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize; const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp" #include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp" #include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "oops/arrayOop.hpp" #include "oops/arrayOop.hpp"
#include "oops/methodData.hpp" #include "oops/methodData.hpp"
#include "oops/method.hpp" #include "oops/method.hpp"
@ -68,9 +69,7 @@ bool CppInterpreter::contains(address pc) {
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
#define __ _masm-> #define __ _masm->
Label frame_manager_entry; Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
// c++ interpreter entry point this holds that entry point label.
static address unctrap_frame_manager_entry = NULL; static address unctrap_frame_manager_entry = NULL;
@ -452,110 +451,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
return NULL; return NULL;
} }
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry
// Generates code to elide accessor methods
// Uses G3_scratch and G1_scratch as scratch
address InterpreterGenerator::generate_accessor_entry(void) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
// parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
address entry = __ pc();
Label slow_path;
if ( UseFastAccessorMethods) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
__ delayed()->nop();
// Check if local 0 != NULL
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
__ delayed()->nop();
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
// get constant pool cache
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
__ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
// get specific constant pool cache entry
__ add(G3_scratch, G1_scratch, G3_scratch);
// Check the constant Pool cache entry to see if it has been resolved.
// If not, need the slow path.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
__ and3(G1_scratch, 0xFF, G1_scratch);
__ cmp(G1_scratch, Bytecodes::_getfield);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
__ delayed()->nop();
// Get the type and return field offset from the constant pool cache
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
Label xreturn_path;
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Get the type from the constant pool cache
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
// Make sure we don't need to mask G1_scratch after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(G1_scratch, atos );
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, itos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, stos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, ctos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
#ifdef ASSERT
__ cmp(G1_scratch, btos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
__ should_not_reach_here();
#endif
__ ldsb(Otos_i, G3_scratch, Otos_i);
__ bind(xreturn_path);
// _ireturn/_areturn
__ retl(); // return from leaf routine
__ delayed()->mov(O5_savedSP, SP);
// Generate regular method entry
__ bind(slow_path);
__ ba(fast_accessor_slow_entry_path);
__ delayed()->nop();
return entry;
}
return NULL;
}
address InterpreterGenerator::generate_Reference_get_entry(void) { address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
if (UseG1GC) { if (UseG1GC) {
@ -573,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point // If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor // Reference.get is an accessor
return generate_accessor_entry(); return generate_jump_to_normal_entry();
} }
// //
@ -1870,23 +1765,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ ba(call_interpreter_2); __ ba(call_interpreter_2);
__ delayed()->st_ptr(O1, STATE(_stack)); __ delayed()->st_ptr(O1, STATE(_stack));
// Fast accessor methods share this entry point.
// This works because frame manager is in the same codelet
// This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
// we need to do a little register fixup here once we distinguish the two of them
if (UseFastAccessorMethods && !synchronized) {
// Call stub_return address still in O7
__ bind(fast_accessor_slow_entry_path);
__ set((intptr_t)return_from_native_method - 8, Gtmp1);
__ cmp(Gtmp1, O7); // returning to interpreter?
__ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
__ delayed()->nop();
__ ba(re_dispatch);
__ delayed()->mov(G0, prevState); // initial entry
}
// interpreter returning to native code (call_stub/c1/c2) // interpreter returning to native code (call_stub/c1/c2)
// convert result and unwind initial activation // convert result and unwind initial activation
// L2_scratch - scaled result type index // L2_scratch - scaled result type index

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,9 +32,11 @@
address generate_normal_entry(bool synchronized); address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized); address generate_native_entry(bool synchronized);
address generate_abstract_entry(void); address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind); // there are no math intrinsics on sparc
address generate_empty_entry(void); address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
address generate_accessor_entry(void); address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(void); address generate_Reference_get_entry(void);
void lock_method(void); void lock_method(void);
void save_native_result(void); void save_native_result(void);
@ -43,4 +45,7 @@
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue); void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label& Lcontinue); void generate_counter_overflow(Label& Lcontinue);
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP #endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP

View file

@ -241,6 +241,15 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
// Various method entries // Various method entries
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
__ jump_to(al, G3_scratch);
__ delayed()->nop();
return entry;
}
// Abstract method entry // Abstract method entry
// Attempt to execute abstract method. Throw exception // Attempt to execute abstract method. Throw exception
// //
@ -255,159 +264,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
} }
//----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native call method.
// These both come in synchronized and non-synchronized versions but the
// frame layout they create is very similar. The other method entry
// types are really just special purpose entries that are really entry
// and interpretation all in one. These are for trivial methods like
// accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// C2 Calling Conventions:
//
// The entry code below assumes that the following registers are set
// when coming in:
// G5_method: holds the Method* of the method to call
// Lesp: points to the TOS of the callers expression stack
// after having pushed all the parameters
//
// The entry code does the following to setup an interpreter frame
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
// bump SP up by X to accomadate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
// save frame doing a save sp, -X, sp growing towards lower addresses
// set Lbcp, Lmethod, LcpoolCache
// set Llocals to i0
// set Lmonitors to FP - rounded_vm_local_words
// set Lesp to Lmonitors - 4
//
// The frame has now been setup to do the rest of the entry code
// Try this optimization: Most method entries could live in a
// "one size fits all" stack frame without all the dynamic size
// calculations. It might be profitable to do all this calculation
// statically and approximately for "small enough" methods.
//-----------------------------------------------------------------------------------------------
// C1 Calling conventions
//
// Upon method entry, the following registers are setup:
//
// g2 G2_thread: current thread
// g5 G5_method: method to activate
// g4 Gargs : pointer to last argument
//
//
// Stack:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : free :
// | |
// +---------------+ <--- Gargs
// | |
// : arguments :
// | |
// +---------------+
// | |
//
//
//
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : :
// | | <--- Lesp
// +---------------+ <--- Lmonitors (fp - 0x18)
// | VM locals |
// +---------------+ <--- fp
// | |
// : reg save area :
// | |
// +---------------+ <--- fp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- fp + 0x5c
// | |
// : free :
// | |
// +---------------+
// | |
// : nonarg locals :
// | |
// +---------------+
// | |
// : arguments :
// | | <--- Llocals
// +---------------+ <--- Gargs
// | |
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : break;
case Interpreter::java_lang_math_cos : break;
case Interpreter::java_lang_math_tan : break;
case Interpreter::java_lang_math_sqrt : break;
case Interpreter::java_lang_math_abs : break;
case Interpreter::java_lang_math_log : break;
case Interpreter::java_lang_math_log10 : break;
case Interpreter::java_lang_math_pow : break;
case Interpreter::java_lang_math_exp : break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) return entry_point;
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) { bool AbstractInterpreter::can_be_compiled(methodHandle m) {
// No special entry points that preclude compilation // No special entry points that preclude compilation
return true; return true;

View file

@ -456,6 +456,115 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// Generate a fixed interpreter frame. This is identical setup for interpreted // Generate a fixed interpreter frame. This is identical setup for interpreted
// methods and for native methods hence the shared code. // methods and for native methods hence the shared code.
//----------------------------------------------------------------------------------------------------
// Stack frame layout
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// C2 Calling Conventions:
//
// The entry code below assumes that the following registers are set
// when coming in:
// G5_method: holds the Method* of the method to call
// Lesp: points to the TOS of the callers expression stack
// after having pushed all the parameters
//
// The entry code does the following to setup an interpreter frame
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
// bump SP up by X to accomadate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
// save frame doing a save sp, -X, sp growing towards lower addresses
// set Lbcp, Lmethod, LcpoolCache
// set Llocals to i0
// set Lmonitors to FP - rounded_vm_local_words
// set Lesp to Lmonitors - 4
//
// The frame has now been setup to do the rest of the entry code
// Try this optimization: Most method entries could live in a
// "one size fits all" stack frame without all the dynamic size
// calculations. It might be profitable to do all this calculation
// statically and approximately for "small enough" methods.
//-----------------------------------------------------------------------------------------------
// C1 Calling conventions
//
// Upon method entry, the following registers are setup:
//
// g2 G2_thread: current thread
// g5 G5_method: method to activate
// g4 Gargs : pointer to last argument
//
//
// Stack:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : free :
// | |
// +---------------+ <--- Gargs
// | |
// : arguments :
// | |
// +---------------+
// | |
//
//
//
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : :
// | | <--- Lesp
// +---------------+ <--- Lmonitors (fp - 0x18)
// | VM locals |
// +---------------+ <--- fp
// | |
// : reg save area :
// | |
// +---------------+ <--- fp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- fp + 0x5c
// | |
// : free :
// | |
// +---------------+
// | |
// : nonarg locals :
// | |
// +---------------+
// | |
// : arguments :
// | | <--- Llocals
// +---------------+ <--- Gargs
// | |
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// //
// //
@ -599,136 +708,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
} }
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
// A method that does nother but return...
address entry = __ pc();
Label slow_path;
// do nothing for empty methods (do not even increment invocation counter)
if ( UseFastEmptyMethods) {
// If we need a safepoint check, generate full interpreter entry.
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ set(sync_state, G3_scratch);
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
// Code: _return
__ retl();
__ delayed()->mov(O5_savedSP, SP);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
return NULL;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry
// Generates code to elide accessor methods
// Uses G3_scratch and G1_scratch as scratch
address InterpreterGenerator::generate_accessor_entry(void) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
// parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
address entry = __ pc();
Label slow_path;
// XXX: for compressed oops pointer loading and decoding doesn't fit in
// delay slot and damages G1
if ( UseFastAccessorMethods && !UseCompressedOops ) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
// Check if local 0 != NULL
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
// check if local 0 == NULL and go the slow path
__ br_null_short(Otos_i, Assembler::pn, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
__ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
// get constant pool cache
__ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
__ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
// get specific constant pool cache entry
__ add(G3_scratch, G1_scratch, G3_scratch);
// Check the constant Pool cache entry to see if it has been resolved.
// If not, need the slow path.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
__ and3(G1_scratch, 0xFF, G1_scratch);
__ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
// Get the type and return field offset from the constant pool cache
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
Label xreturn_path;
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Get the type from the constant pool cache
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
// Make sure we don't need to mask G1_scratch after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(G1_scratch, atos );
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, itos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, stos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, ctos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
#ifdef ASSERT
__ cmp(G1_scratch, btos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
__ should_not_reach_here();
#endif
__ ldsb(Otos_i, G3_scratch, Otos_i);
__ bind(xreturn_path);
// _ireturn/_areturn
__ retl(); // return from leaf routine
__ delayed()->mov(O5_savedSP, SP);
// Generate regular method entry
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
return NULL;
}
// Method entry for java.lang.ref.Reference.get. // Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) { address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
@ -806,7 +785,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point // If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor // Reference.get is an accessor
return generate_accessor_entry(); return generate_jump_to_normal_entry();
} }
// //
@ -1242,8 +1221,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Generic method entry to (asm) interpreter // Generic method entry to (asm) interpreter
//------------------------------------------------------------------------------------------------------------------------
//
address InterpreterGenerator::generate_normal_entry(bool synchronized) { address InterpreterGenerator::generate_normal_entry(bool synchronized) {
address entry = __ pc(); address entry = __ pc();
@ -1410,123 +1387,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry; return entry;
} }
//----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native call method.
// These both come in synchronized and non-synchronized versions but the
// frame layout they create is very similar. The other method entry
// types are really just special purpose entries that are really entry
// and interpretation all in one. These are for trivial methods like
// accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// C2 Calling Conventions:
//
// The entry code below assumes that the following registers are set
// when coming in:
// G5_method: holds the Method* of the method to call
// Lesp: points to the TOS of the callers expression stack
// after having pushed all the parameters
//
// The entry code does the following to setup an interpreter frame
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
// bump SP up by X to accomadate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
// save frame doing a save sp, -X, sp growing towards lower addresses
// set Lbcp, Lmethod, LcpoolCache
// set Llocals to i0
// set Lmonitors to FP - rounded_vm_local_words
// set Lesp to Lmonitors - 4
//
// The frame has now been setup to do the rest of the entry code
// Try this optimization: Most method entries could live in a
// "one size fits all" stack frame without all the dynamic size
// calculations. It might be profitable to do all this calculation
// statically and approximately for "small enough" methods.
//-----------------------------------------------------------------------------------------------
// C1 Calling conventions
//
// Upon method entry, the following registers are setup:
//
// g2 G2_thread: current thread
// g5 G5_method: method to activate
// g4 Gargs : pointer to last argument
//
//
// Stack:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : free :
// | |
// +---------------+ <--- Gargs
// | |
// : arguments :
// | |
// +---------------+
// | |
//
//
//
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : :
// | | <--- Lesp
// +---------------+ <--- Lmonitors (fp - 0x18)
// | VM locals |
// +---------------+ <--- fp
// | |
// : reg save area :
// | |
// +---------------+ <--- fp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- fp + 0x5c
// | |
// : free :
// | |
// +---------------+
// | |
// : nonarg locals :
// | |
// +---------------+
// | |
// : arguments :
// | | <--- Llocals
// +---------------+ <--- Gargs
// | |
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated // Figure out the size of an interpreter frame (in words) given that we have a fully allocated

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,21 +27,6 @@
protected: protected:
#if 0
address generate_asm_interpreter_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
void lock_method(void);
void generate_stack_overflow_check(void);
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label* do_continue);
#endif
void generate_more_monitors(); void generate_more_monitors();
void generate_deopt_handling(); void generate_deopt_handling();
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -66,9 +66,6 @@ extern "C" void RecursiveInterpreterActivation(interpreterState istate )
#define __ _masm-> #define __ _masm->
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name))) #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
// c++ interpreter entry point this holds that entry point label.
// default registers for state and sender_sp // default registers for state and sender_sp
// state and sender_sp are the same on 32bit because we have no choice. // state and sender_sp are the same on 32bit because we have no choice.
// state could be rsi on 64bit but it is an arg reg and not callee save // state could be rsi on 64bit but it is an arg reg and not callee save
@ -660,7 +657,6 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// generate_method_entry) so the guard should work for them too. // generate_method_entry) so the guard should work for them too.
// //
// monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp, thru expr stack bottom). // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@ -794,156 +790,6 @@ void InterpreterGenerator::lock_method(void) {
__ lock_object(monitor); __ lock_object(monitor);
} }
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx: Method*
// rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
address entry_point = __ pc();
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// ASM/C++ Interpreter
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
// rbx,: method
// rcx: receiver
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
// rcx: receiver - do not destroy since it is needed for slow path!
// rcx: scratch
// rdx: constant pool cache index
// rdi: constant pool cache
// rsi/r13: sender sp
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rdx after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
#ifdef _LP64
Label notObj;
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
__ movptr(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
#endif // _LP64
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
__ load_signed_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
__ cmpl(rdx, ctos);
__ jcc(Assembler::notEqual, notChar);
__ load_unsigned_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notChar);
#ifdef ASSERT
Label okay;
#ifndef _LP64
__ cmpl(rdx, atos);
__ jcc(Assembler::equal, okay);
#endif // _LP64
__ cmpl(rdx, itos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif // ASSERT
// All the rest are a 32 bit wordsize
__ movl(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi); // get return address
__ mov(rsp, sender_sp_on_entry); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
// We will enter c++ interpreter looking like it was
// called by the call_stub this will cause it to return
// a tosca result to the invoker which might have been
// the c++ interpreter itself.
__ jmp(fast_accessor_slow_entry_path);
return entry_point;
} else {
return NULL;
}
}
address InterpreterGenerator::generate_Reference_get_entry(void) { address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
if (UseG1GC) { if (UseG1GC) {
@ -961,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point // If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor // Reference.get is an accessor
return generate_accessor_entry(); return generate_jump_to_normal_entry();
} }
// //
@ -1670,10 +1516,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
address entry_point = __ pc(); address entry_point = __ pc();
// Fast accessor methods share this entry point.
// This works because frame manager is in the same codelet
if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
Label dispatch_entry_2; Label dispatch_entry_2;
__ movptr(rcx, sender_sp_on_entry); __ movptr(rcx, sender_sp_on_entry);
__ movptr(state, (int32_t)NULL_WORD); // no current activation __ movptr(state, (int32_t)NULL_WORD); // no current activation
@ -2212,40 +2054,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point; return entry_point;
} }
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : // fall thru
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}
if (entry_point) return entry_point;
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code) InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) { : CppInterpreterGenerator(code) {

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#define __ _masm->
// Jump into normal path for accessor and empty entry to jump to normal entry
// The "fast" optimization don't update compilation count therefore can disable inlining
// for these functions that should be inlined.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry_point = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
return entry_point;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
address entry_point = __ pc();
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,9 @@
address generate_native_entry(bool synchronized); address generate_native_entry(bool synchronized);
address generate_abstract_entry(void); address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void); address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void); address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(); address generate_Reference_get_entry();
address generate_CRC32_update_entry(); address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind); address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);

View file

@ -67,45 +67,6 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
} }
//
// Various method entries (that c++ and asm interpreter agree upon)
//------------------------------------------------------------------------------------------------------------------------
//
//
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
// rbx,: Method*
// rcx: receiver (unused)
// rsi: previous interpreter state (C++ interpreter) must preserve
// rsi: sender sp must set sp to this value on return
if (!UseFastEmptyMethods) return NULL;
address entry_point = __ pc();
// If we need a safepoint check, generate full interpreter entry.
Label slow_path;
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// do nothing for empty methods (do not even increment invocation counter)
// Code: _return
// _return
// return w/o popping parameters
__ pop(rax);
__ mov(rsp, rsi);
__ jmp(rax);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry_point;
}
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method* // rbx,: Method*
@ -216,36 +177,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
} }
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
// rbx,: Method*
// rcx: receiver (unused)
// rsi: previous interpreter state (C++ interpreter) must preserve
// rsi: sender SP
address entry_point = __ pc();
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in

View file

@ -301,66 +301,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
return entry_point; return entry_point;
} }
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
// rbx: Method*
// r13: sender SP
address entry_point = __ pc();
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
// rbx: Method*
// r13: sender sp must set sp to this value on return
if (!UseFastEmptyMethods) {
return NULL;
}
address entry_point = __ pc();
// If we need a safepoint check, generate full interpreter entry.
Label slow_path;
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// do nothing for empty methods (do not even increment invocation counter)
// Code: _return
// _return
// return w/o popping parameters
__ pop(rax);
__ mov(rsp, r13);
__ jmp(rax);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry_point;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in

View file

@ -38,7 +38,7 @@ int AbstractInterpreter::size_activation(int max_stack,
int callee_locals, int callee_locals,
bool is_top_frame) { bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup // Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry. // in InterpreterGenerator::generate_fixed_frame.
// fixed size of an interpreter frame: // fixed size of an interpreter frame:
int overhead = frame::sender_sp_offset - int overhead = frame::sender_sp_offset -

View file

@ -468,10 +468,10 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// rax, // rax,
// NOTE: since the additional locals are also always pushed (wasn't obvious in // NOTE: since the additional locals are also always pushed (wasn't obvious in
// generate_method_entry) so the guard should work for them too. // generate_fixed_frame) so the guard should work for them too.
// //
// monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp // monitor entry size: see picture of stack in frame_x86.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp, thru expr stack bottom). // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@ -633,145 +633,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
} }
// End of helpers
//
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//
//
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx,: Method*
// rcx: receiver (preserve for slow entry into asm interpreter)
// rsi: senderSP must preserved for slow path, set SP to it on fast path
address entry_point = __ pc();
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// ASM/C++ Interpreter
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
// rbx,: method
// rcx: receiver
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
// rcx: receiver - do not destroy since it is needed for slow path!
// rcx: scratch
// rdx: constant pool cache index
// rdi: constant pool cache
// rsi: sender sp
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rdx after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
__ load_signed_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
__ cmpl(rdx, ctos);
__ jcc(Assembler::notEqual, notChar);
__ load_unsigned_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notChar);
#ifdef ASSERT
Label okay;
__ cmpl(rdx, atos);
__ jcc(Assembler::equal, okay);
__ cmpl(rdx, itos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif // ASSERT
// All the rest are a 32 bit wordsize
// This is ok for now. Since fast accessors should be going away
__ movptr(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry_point;
}
return NULL;
}
// Method entry for java.lang.ref.Reference.get. // Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) { address InterpreterGenerator::generate_Reference_get_entry(void) {
@ -862,7 +723,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point // If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor // Reference.get is an accessor
return generate_accessor_entry(); return generate_jump_to_normal_entry();
} }
/** /**
@ -1557,100 +1418,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point; return entry_point;
} }
//------------------------------------------------------------------------------------------------------------------------
// Entry points
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native call method.
// These both come in synchronized and non-synchronized versions but the
// frame layout they create is very similar. The other method entry
// types are really just special purpose entries that are really entry
// and interpretation all in one. These are for trivial methods like
// accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// Arguments:
//
// rbx,: Method*
// rcx: receiver
//
//
// Stack layout immediately at entry
//
// [ return address ] <--- rsp
// [ parameter n ]
// ...
// [ parameter 1 ]
// [ expression stack ] (caller's java expression stack)
// Assuming that we don't go to one of the trivial specialized
// entries the stack will look like below when we are ready to execute
// the first bytecode (or call the native routine). The register usage
// will be as the template based interpreter expects (see interpreter_x86.hpp).
//
// local variables follow incoming parameters immediately; i.e.
// the return address is moved to the end of the locals).
//
// [ monitor entry ] <--- rsp
// ...
// [ monitor entry ]
// [ expr. stack bottom ]
// [ saved rsi ]
// [ current rdi ]
// [ Method* ]
// [ saved rbp, ] <--- rbp,
// [ return address ]
// [ local variable m ]
// ...
// [ local variable 1 ]
// [ parameter n ]
// ...
// [ parameter 1 ] <--- rdi
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ig_this->generate_Reference_get_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: entry_point = ig_this->generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) return entry_point;
return ig_this->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer // These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version. // the compiled version to the intrinsic version.

View file

@ -400,7 +400,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// page mechanism will work for that. // page mechanism will work for that.
// //
// NOTE: Since the additional locals are also always pushed (wasn't // NOTE: Since the additional locals are also always pushed (wasn't
// obvious in generate_method_entry) so the guard should work for them // obvious in generate_fixed_frame) so the guard should work for them
// too. // too.
// //
// Args: // Args:
@ -411,8 +411,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// rax // rax
void InterpreterGenerator::generate_stack_overflow_check(void) { void InterpreterGenerator::generate_stack_overflow_check(void) {
// monitor entry size: see picture of stack set // monitor entry size: see picture of stack in frame_x86.hpp
// (generate_method_entry) and frame_amd64.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp through expr stack // total overhead size: entry_size + (saved rbp through expr stack
@ -600,153 +599,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// End of helpers // End of helpers
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//
//
// Call an accessor method (assuming it is resolved, otherwise drop
// into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx: Method*
// r13: senderSP must preserver for slow path, set SP to it on fast path
address entry_point = __ pc();
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
// thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// rbx: method
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2 * BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax: local 0
// rbx: method
// rdx: constant pool cache index
// rdi: constant pool cache
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
"adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_8,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2 * BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_8,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset()));
// edx: flags
__ movl(rdx,
Address(rdi,
rdx,
Address::times_8,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
Label notObj, notInt, notByte, notShort;
const Address field_address(rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask edx after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
__ load_heap_oop(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
__ cmpl(rdx, itos);
__ jcc(Assembler::notEqual, notInt);
// itos
__ movl(rax, field_address);
__ jmp(xreturn_path);
__ bind(notInt);
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
// btos
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
// stos
__ load_signed_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
#ifdef ASSERT
Label okay;
__ cmpl(rdx, ctos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif
// ctos
__ load_unsigned_short(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi);
__ mov(rsp, r13);
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
} else {
(void) generate_normal_entry(false);
}
return entry_point;
}
// Method entry for java.lang.ref.Reference.get. // Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) { address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
@ -773,8 +625,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// and so we don't need to call the G1 pre-barrier. Thus we can use the // and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE. // regular method entry code to generate the NPE.
// //
// This code is based on generate_accessor_enty.
//
// rbx: Method* // rbx: Method*
// r13: senderSP must preserve for slow path, set SP to it on fast path // r13: senderSP must preserve for slow path, set SP to it on fast path
@ -832,7 +682,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point // If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor // Reference.get is an accessor
return generate_accessor_entry(); return generate_jump_to_normal_entry();
} }
/** /**
@ -1566,100 +1416,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point; return entry_point;
} }
// Entry points
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native
// call method. These both come in synchronized and non-synchronized
// versions but the frame layout they create is very similar. The
// other method entry types are really just special purpose entries
// that are really entry and interpretation all in one. These are for
// trivial methods like accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// Arguments:
//
// rbx: Method*
//
// Stack layout immediately at entry
//
// [ return address ] <--- rsp
// [ parameter n ]
// ...
// [ parameter 1 ]
// [ expression stack ] (caller's java expression stack)
// Assuming that we don't go to one of the trivial specialized entries
// the stack will look like below when we are ready to execute the
// first bytecode (or call the native routine). The register usage
// will be as the template based interpreter expects (see
// interpreter_amd64.hpp).
//
// local variables follow incoming parameters immediately; i.e.
// the return address is moved to the end of the locals).
//
// [ monitor entry ] <--- rsp
// ...
// [ monitor entry ]
// [ expr. stack bottom ]
// [ saved r13 ]
// [ current r14 ]
// [ Method* ]
// [ saved ebp ] <--- rbp
// [ return address ]
// [ local variable m ]
// ...
// [ local variable 1 ]
// [ parameter n ]
// ...
// [ parameter 1 ] <--- r14
address AbstractInterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ig_this->generate_Reference_get_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: entry_point = ig_this->generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) {
return entry_point;
}
return ig_this->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer // These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version. // the compiled version to the intrinsic version.

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -831,60 +831,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return generate_entry((address) CppInterpreter::normal_entry); return generate_entry((address) CppInterpreter::normal_entry);
} }
address AbstractInterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals:
case Interpreter::zerolocals_synchronized:
break;
case Interpreter::native:
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
break;
case Interpreter::native_synchronized:
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
break;
case Interpreter::empty:
entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
break;
case Interpreter::accessor:
entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
break;
case Interpreter::abstract:
entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
break;
case Interpreter::java_lang_math_sin:
case Interpreter::java_lang_math_cos:
case Interpreter::java_lang_math_tan:
case Interpreter::java_lang_math_abs:
case Interpreter::java_lang_math_log:
case Interpreter::java_lang_math_log10:
case Interpreter::java_lang_math_sqrt:
case Interpreter::java_lang_math_pow:
case Interpreter::java_lang_math_exp:
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
break;
case Interpreter::java_lang_ref_reference_get:
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
break;
default:
ShouldNotReachHere();
}
if (entry_point == NULL)
entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
return entry_point;
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code) InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) { : CppInterpreterGenerator(code) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -61,6 +61,12 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
define_pd_global(uintx, TypeProfileLevel, 0); define_pd_global(uintx, TypeProfileLevel, 0);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
product(bool, UseFastEmptyMethods, true, \
"Use fast method entry code for empty methods") \
\
product(bool, UseFastAccessorMethods, true, \
"Use fast method entry code for accessor methods") \
\
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc. * Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -39,4 +39,7 @@
address generate_accessor_entry(); address generate_accessor_entry();
address generate_Reference_get_entry(); address generate_Reference_get_entry();
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP

View file

@ -280,9 +280,6 @@ class AbstractInterpreterGenerator: public StackObj {
address generate_result_handler_for(BasicType type); address generate_result_handler_for(BasicType type);
address generate_slow_signature_handler(); address generate_slow_signature_handler();
// entry point generator
address generate_method_entry(AbstractInterpreter::MethodKind kind);
void bang_stack_shadow_pages(bool native_call); void bang_stack_shadow_pages(bool native_call);
void generate_all(); void generate_all();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -108,7 +108,7 @@ void CppInterpreterGenerator::generate_all() {
} }
#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind) #define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind)
{ CodeletMark cm(_masm, "(kind = frame_manager)"); { CodeletMark cm(_masm, "(kind = frame_manager)");
// all non-native method kinds // all non-native method kinds

View file

@ -29,6 +29,7 @@
#include "interpreter/bytecodeHistogram.hpp" #include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/bytecodeInterpreter.hpp" #include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp" #include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp" #include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp" #include "interpreter/templateTable.hpp"
@ -261,7 +262,7 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
// Special intrinsic method? // Special intrinsic method?
// Note: This test must come _after_ the test for native methods, // Note: This test must come _after_ the test for native methods,
// otherwise we will run into problems with JDK 1.2, see also // otherwise we will run into problems with JDK 1.2, see also
// AbstractInterpreterGenerator::generate_method_entry() for // InterpreterGenerator::generate_method_entry() for
// for details. // for details.
switch (m->intrinsic_id()) { switch (m->intrinsic_id()) {
case vmIntrinsics::_dsin : return java_lang_math_sin ; case vmIntrinsics::_dsin : return java_lang_math_sin ;
@ -521,3 +522,50 @@ void AbstractInterpreterGenerator::initialize_method_handle_entries() {
Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
} }
} }
// Generate method entries
address InterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break;
case Interpreter::empty : entry_point = generate_empty_entry(); break;
case Interpreter::accessor : entry_point = generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = generate_Reference_get_entry(); break;
#ifndef CC_INTERP
case Interpreter::java_util_zip_CRC32_update
: entry_point = generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = generate_CRC32_updateBytes_entry(kind); break;
#endif // CC_INTERP
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) {
return entry_point;
}
return generate_normal_entry(synchronized);
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,6 +40,8 @@ class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
public: public:
InterpreterGenerator(StubQueue* _code); InterpreterGenerator(StubQueue* _code);
// entry point generator
address generate_method_entry(AbstractInterpreter::MethodKind kind);
#ifdef TARGET_ARCH_x86 #ifdef TARGET_ARCH_x86
# include "interpreterGenerator_x86.hpp" # include "interpreterGenerator_x86.hpp"

View file

@ -364,7 +364,7 @@ void TemplateInterpreterGenerator::generate_all() {
#define method_entry(kind) \ #define method_entry(kind) \
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \ { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \ Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind); \
} }
// all non-native method kinds // all non-native method kinds

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -59,9 +59,6 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
address generate_safept_entry_for(TosState state, address runtime_entry); address generate_safept_entry_for(TosState state, address runtime_entry);
void generate_throw_exception(); void generate_throw_exception();
// entry point generator
// address generate_method_entry(AbstractInterpreter::MethodKind kind);
// Instruction generation // Instruction generation
void generate_and_dispatch (Template* t, TosState tos_out = ilgl); void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep); void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -316,6 +316,7 @@ void JvmtiManageCapabilities::update() {
avail.can_generate_frame_pop_events || avail.can_generate_frame_pop_events ||
avail.can_generate_method_entry_events || avail.can_generate_method_entry_events ||
avail.can_generate_method_exit_events; avail.can_generate_method_exit_events;
#ifdef ZERO
bool enter_all_methods = bool enter_all_methods =
interp_events || interp_events ||
avail.can_generate_breakpoint_events; avail.can_generate_breakpoint_events;
@ -324,6 +325,7 @@ void JvmtiManageCapabilities::update() {
UseFastEmptyMethods = false; UseFastEmptyMethods = false;
UseFastAccessorMethods = false; UseFastAccessorMethods = false;
} }
#endif // ZERO
if (avail.can_generate_breakpoint_events) { if (avail.can_generate_breakpoint_events) {
RewriteFrequentPairs = false; RewriteFrequentPairs = false;

View file

@ -301,6 +301,10 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) }, { "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) }, { "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) }, { "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifndef ZERO
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#endif // ZERO
{ NULL, JDK_Version(0), JDK_Version(0) } { NULL, JDK_Version(0), JDK_Version(0) }
}; };
@ -1071,16 +1075,6 @@ void Arguments::set_mode_flags(Mode mode) {
UseCompiler = true; UseCompiler = true;
UseLoopCounter = true; UseLoopCounter = true;
#ifndef ZERO
// Turn these off for mixed and comp. Leave them on for Zero.
if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
UseFastAccessorMethods = (mode == _int);
}
if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
UseFastEmptyMethods = (mode == _int);
}
#endif
// Default values may be platform/compiler dependent - // Default values may be platform/compiler dependent -
// use the saved values // use the saved values
ClipInlining = Arguments::_ClipInlining; ClipInlining = Arguments::_ClipInlining;

View file

@ -2784,12 +2784,6 @@ class CommandLineFlags {
product(bool, UseLoopCounter, true, \ product(bool, UseLoopCounter, true, \
"Increment invocation counter on backward branch") \ "Increment invocation counter on backward branch") \
\ \
product(bool, UseFastEmptyMethods, true, \
"Use fast method entry code for empty methods") \
\
product(bool, UseFastAccessorMethods, true, \
"Use fast method entry code for accessor methods") \
\
product_pd(bool, UseOnStackReplacement, \ product_pd(bool, UseOnStackReplacement, \
"Use on stack replacement, calls runtime if invoc. counter " \ "Use on stack replacement, calls runtime if invoc. counter " \
"overflows in loop") \ "overflows in loop") \