This commit is contained in:
John R Rose 2010-06-02 22:45:42 -07:00
commit d6a9b93b5a
133 changed files with 1889 additions and 1307 deletions

View file

@ -42,8 +42,6 @@ public class CodeBlob extends VMObject {
private static CIntegerField instructionsOffsetField; private static CIntegerField instructionsOffsetField;
private static CIntegerField frameCompleteOffsetField; private static CIntegerField frameCompleteOffsetField;
private static CIntegerField dataOffsetField; private static CIntegerField dataOffsetField;
private static CIntegerField oopsOffsetField;
private static CIntegerField oopsLengthField;
private static CIntegerField frameSizeField; private static CIntegerField frameSizeField;
private static AddressField oopMapsField; private static AddressField oopMapsField;
@ -72,8 +70,6 @@ public class CodeBlob extends VMObject {
frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset"); frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
instructionsOffsetField = type.getCIntegerField("_instructions_offset"); instructionsOffsetField = type.getCIntegerField("_instructions_offset");
dataOffsetField = type.getCIntegerField("_data_offset"); dataOffsetField = type.getCIntegerField("_data_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset");
oopsLengthField = type.getCIntegerField("_oops_length");
frameSizeField = type.getCIntegerField("_frame_size"); frameSizeField = type.getCIntegerField("_frame_size");
oopMapsField = type.getAddressField("_oop_maps"); oopMapsField = type.getAddressField("_oop_maps");
@ -131,19 +127,10 @@ public class CodeBlob extends VMObject {
return headerBegin().addOffsetTo(sizeField.getValue(addr)); return headerBegin().addOffsetTo(sizeField.getValue(addr));
} }
public Address oopsBegin() {
return headerBegin().addOffsetTo(oopsOffsetField.getValue(addr));
}
public Address oopsEnd() {
return oopsBegin().addOffsetTo(getOopsLength());
}
// Offsets // Offsets
public int getRelocationOffset() { return (int) headerSizeField.getValue(addr); } public int getRelocationOffset() { return (int) headerSizeField.getValue(addr); }
public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); } public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField.getValue(addr); } public int getDataOffset() { return (int) dataOffsetField.getValue(addr); }
public int getOopsOffset() { return (int) oopsOffsetField.getValue(addr); }
// Sizes // Sizes
public int getSize() { return (int) sizeField.getValue(addr); } public int getSize() { return (int) sizeField.getValue(addr); }
@ -157,19 +144,9 @@ public class CodeBlob extends VMObject {
// FIXME: add relocationContains // FIXME: add relocationContains
public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); } public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); }
public boolean dataContains(Address addr) { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr); } public boolean dataContains(Address addr) { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr); }
public boolean oopsContains(Address addr) { return oopsBegin().lessThanOrEqual(addr) && oopsEnd().greaterThan(addr); }
public boolean contains(Address addr) { return instructionsContains(addr); } public boolean contains(Address addr) { return instructionsContains(addr); }
public boolean isFrameCompleteAt(Address a) { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); } public boolean isFrameCompleteAt(Address a) { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); }
/** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
public OopHandle getOopAt(int index) {
if (index == 0) return null;
if (Assert.ASSERTS_ENABLED) {
Assert.that(index > 0 && index <= getOopsLength(), "must be a valid non-zero index");
}
return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
}
// Reclamation support (really only used by the nmethods, but in order to get asserts to work // Reclamation support (really only used by the nmethods, but in order to get asserts to work
// in the CodeCache they are defined virtual here) // in the CodeCache they are defined virtual here)
public boolean isZombie() { return false; } public boolean isZombie() { return false; }
@ -223,18 +200,8 @@ public class CodeBlob extends VMObject {
} }
protected void printComponentsOn(PrintStream tty) { protected void printComponentsOn(PrintStream tty) {
// FIXME: add relocation information
tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " + tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
" data: [" + dataBegin() + ", " + dataEnd() + "), " + " data: [" + dataBegin() + ", " + dataEnd() + "), " +
" oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
" frame size: " + getFrameSize()); " frame size: " + getFrameSize());
} }
//--------------------------------------------------------------------------------
// Internals only below this point
//
private int getOopsLength() {
return (int) oopsLengthField.getValue(addr);
}
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -49,6 +49,7 @@ public class NMethod extends CodeBlob {
private static CIntegerField deoptOffsetField; private static CIntegerField deoptOffsetField;
private static CIntegerField origPCOffsetField; private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField; private static CIntegerField stubOffsetField;
private static CIntegerField oopsOffsetField;
private static CIntegerField scopesDataOffsetField; private static CIntegerField scopesDataOffsetField;
private static CIntegerField scopesPCsOffsetField; private static CIntegerField scopesPCsOffsetField;
private static CIntegerField dependenciesOffsetField; private static CIntegerField dependenciesOffsetField;
@ -98,6 +99,7 @@ public class NMethod extends CodeBlob {
deoptOffsetField = type.getCIntegerField("_deoptimize_offset"); deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset"); origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset"); stubOffsetField = type.getCIntegerField("_stub_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset");
scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset"); scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset");
scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset"); scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset");
dependenciesOffsetField = type.getCIntegerField("_dependencies_offset"); dependenciesOffsetField = type.getCIntegerField("_dependencies_offset");
@ -141,7 +143,9 @@ public class NMethod extends CodeBlob {
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); } public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); } public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); } public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); } public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); } public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
@ -156,6 +160,7 @@ public class NMethod extends CodeBlob {
public int constantsSize() { return (int) constantsEnd() .minus(constantsBegin()); } public int constantsSize() { return (int) constantsEnd() .minus(constantsBegin()); }
public int codeSize() { return (int) codeEnd() .minus(codeBegin()); } public int codeSize() { return (int) codeEnd() .minus(codeBegin()); }
public int stubSize() { return (int) stubEnd() .minus(stubBegin()); } public int stubSize() { return (int) stubEnd() .minus(stubBegin()); }
public int oopsSize() { return (int) oopsEnd() .minus(oopsBegin()); }
public int scopesDataSize() { return (int) scopesDataEnd() .minus(scopesDataBegin()); } public int scopesDataSize() { return (int) scopesDataEnd() .minus(scopesDataBegin()); }
public int scopesPCsSize() { return (int) scopesPCsEnd() .minus(scopesPCsBegin()); } public int scopesPCsSize() { return (int) scopesPCsEnd() .minus(scopesPCsBegin()); }
public int dependenciesSize() { return (int) dependenciesEnd().minus(dependenciesBegin()); } public int dependenciesSize() { return (int) dependenciesEnd().minus(dependenciesBegin()); }
@ -178,6 +183,7 @@ public class NMethod extends CodeBlob {
public boolean constantsContains (Address addr) { return constantsBegin() .lessThanOrEqual(addr) && constantsEnd() .greaterThan(addr); } public boolean constantsContains (Address addr) { return constantsBegin() .lessThanOrEqual(addr) && constantsEnd() .greaterThan(addr); }
public boolean codeContains (Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); } public boolean codeContains (Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); }
public boolean stubContains (Address addr) { return stubBegin() .lessThanOrEqual(addr) && stubEnd() .greaterThan(addr); } public boolean stubContains (Address addr) { return stubBegin() .lessThanOrEqual(addr) && stubEnd() .greaterThan(addr); }
public boolean oopsContains (Address addr) { return oopsBegin() .lessThanOrEqual(addr) && oopsEnd() .greaterThan(addr); }
public boolean scopesDataContains (Address addr) { return scopesDataBegin() .lessThanOrEqual(addr) && scopesDataEnd() .greaterThan(addr); } public boolean scopesDataContains (Address addr) { return scopesDataBegin() .lessThanOrEqual(addr) && scopesDataEnd() .greaterThan(addr); }
public boolean scopesPCsContains (Address addr) { return scopesPCsBegin() .lessThanOrEqual(addr) && scopesPCsEnd() .greaterThan(addr); } public boolean scopesPCsContains (Address addr) { return scopesPCsBegin() .lessThanOrEqual(addr) && scopesPCsEnd() .greaterThan(addr); }
public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); } public boolean handlerTableContains(Address addr) { return handlerTableBegin().lessThanOrEqual(addr) && handlerTableEnd().greaterThan(addr); }
@ -187,6 +193,15 @@ public class NMethod extends CodeBlob {
public Address getEntryPoint() { return entryPointField.getValue(addr); } public Address getEntryPoint() { return entryPointField.getValue(addr); }
public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); } public Address getVerifiedEntryPoint() { return verifiedEntryPointField.getValue(addr); }
/** Support for oops in scopes and relocs. Note: index 0 is reserved for null. */
public OopHandle getOopAt(int index) {
if (index == 0) return null;
if (Assert.ASSERTS_ENABLED) {
Assert.that(index > 0 && index <= oopsSize(), "must be a valid non-zero index");
}
return oopsBegin().getOopHandleAt((index - 1) * VM.getVM().getOopSize());
}
// FIXME: add interpreter_entry_point() // FIXME: add interpreter_entry_point()
// FIXME: add lazy_interpreter_entry_point() for C2 // FIXME: add lazy_interpreter_entry_point() for C2
@ -338,6 +353,14 @@ public class NMethod extends CodeBlob {
printOn(System.out); printOn(System.out);
} }
protected void printComponentsOn(PrintStream tty) {
// FIXME: add relocation information
tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
" data: [" + dataBegin() + ", " + dataEnd() + "), " +
" oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
" frame size: " + getFrameSize());
}
public String toString() { public String toString() {
Method method = getMethod(); Method method = getMethod();
return "NMethod for " + return "NMethod for " +
@ -367,6 +390,7 @@ public class NMethod extends CodeBlob {
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); } private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); } private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); } private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); } private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); } private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); }
private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); } private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }

View file

@ -73,17 +73,10 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
public CompactibleFreeListSpace(Address addr) { public CompactibleFreeListSpace(Address addr) {
super(addr); super(addr);
if ( VM.getVM().isLP64() ) { VM vm = VM.getVM();
heapWordSize = 8; heapWordSize = vm.getHeapWordSize();
IndexSetStart = 1; IndexSetStart = vm.getMinObjAlignmentInBytes() / heapWordSize;
IndexSetStride = 1; IndexSetStride = IndexSetStart;
}
else {
heapWordSize = 4;
IndexSetStart = 2;
IndexSetStride = 2;
}
IndexSetSize = 257; IndexSetSize = 257;
} }

View file

@ -128,7 +128,7 @@ public class Oop {
// Align the object size. // Align the object size.
public static long alignObjectSize(long size) { public static long alignObjectSize(long size) {
return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment()); return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
} }
// All vm's align longs, so pad out certain offsets. // All vm's align longs, so pad out certain offsets.

View file

@ -93,6 +93,7 @@ public class VM {
/** alignment constants */ /** alignment constants */
private boolean isLP64; private boolean isLP64;
private int bytesPerLong; private int bytesPerLong;
private int objectAlignmentInBytes;
private int minObjAlignmentInBytes; private int minObjAlignmentInBytes;
private int logMinObjAlignmentInBytes; private int logMinObjAlignmentInBytes;
private int heapWordSize; private int heapWordSize;
@ -313,9 +314,6 @@ public class VM {
isLP64 = debugger.getMachineDescription().isLP64(); isLP64 = debugger.getMachineDescription().isLP64();
} }
bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue(); bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
// minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
heapWordSize = db.lookupIntConstant("HeapWordSize").intValue(); heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
oopSize = db.lookupIntConstant("oopSize").intValue(); oopSize = db.lookupIntConstant("oopSize").intValue();
@ -323,6 +321,15 @@ public class VM {
uintxType = db.lookupType("uintx"); uintxType = db.lookupType("uintx");
boolType = (CIntegerType) db.lookupType("bool"); boolType = (CIntegerType) db.lookupType("bool");
minObjAlignmentInBytes = getObjectAlignmentInBytes();
if (minObjAlignmentInBytes == 8) {
logMinObjAlignmentInBytes = 3;
} else if (minObjAlignmentInBytes == 16) {
logMinObjAlignmentInBytes = 4;
} else {
throw new RuntimeException("Object alignment " + minObjAlignmentInBytes + " not yet supported");
}
if (isCompressedOopsEnabled()) { if (isCompressedOopsEnabled()) {
// Size info for oops within java objects is fixed // Size info for oops within java objects is fixed
heapOopSize = (int)getIntSize(); heapOopSize = (int)getIntSize();
@ -492,10 +499,6 @@ public class VM {
} }
/** Get minimum object alignment in bytes. */ /** Get minimum object alignment in bytes. */
public int getMinObjAlignment() {
return minObjAlignmentInBytes;
}
public int getMinObjAlignmentInBytes() { public int getMinObjAlignmentInBytes() {
return minObjAlignmentInBytes; return minObjAlignmentInBytes;
} }
@ -754,6 +757,14 @@ public class VM {
return compressedOopsEnabled.booleanValue(); return compressedOopsEnabled.booleanValue();
} }
public int getObjectAlignmentInBytes() {
if (objectAlignmentInBytes == 0) {
Flag flag = getCommandLineFlag("ObjectAlignmentInBytes");
objectAlignmentInBytes = (flag == null) ? 8 : (int)flag.getIntx();
}
return objectAlignmentInBytes;
}
// returns null, if not available. // returns null, if not available.
public Flag[] getCommandLineFlags() { public Flag[] getCommandLineFlags() {
if (commandLineFlags == null) { if (commandLineFlags == null) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,12 @@ public class PointerFinder {
} }
loc.inBlobInstructions = loc.blob.instructionsContains(a); loc.inBlobInstructions = loc.blob.instructionsContains(a);
loc.inBlobData = loc.blob.dataContains(a); loc.inBlobData = loc.blob.dataContains(a);
loc.inBlobOops = loc.blob.oopsContains(a);
if (loc.blob.isNMethod()) {
NMethod nm = (NMethod) loc.blob;
loc.inBlobOops = nm.oopsContains(a);
}
loc.inBlobUnknownLocation = (!(loc.inBlobInstructions || loc.inBlobUnknownLocation = (!(loc.inBlobInstructions ||
loc.inBlobData || loc.inBlobData ||
loc.inBlobOops)); loc.inBlobOops));

View file

@ -80,13 +80,11 @@ ifeq ($(ARCH_DATA_MODEL), 32)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so
ifeq ($(ARCH),sparc)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
endif endif
endif
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar

View file

@ -87,6 +87,7 @@ REGISTER_DECLARATION(Register, Gtemp , G5);
// JSR 292 fixed register usages: // JSR 292 fixed register usages:
REGISTER_DECLARATION(Register, G5_method_type , G5); REGISTER_DECLARATION(Register, G5_method_type , G5);
REGISTER_DECLARATION(Register, G3_method_handle , G3); REGISTER_DECLARATION(Register, G3_method_handle , G3);
REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
// The compiler requires that G5_megamorphic_method is G5_inline_cache_klass, // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
// because a single patchable "set" instruction (NativeMovConstReg, // because a single patchable "set" instruction (NativeMovConstReg,

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -345,6 +345,13 @@ LIR_Opr FrameMap::stack_pointer() {
} }
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
assert(L7 == L7_mh_SP_save, "must be same register");
return L7_opr;
}
bool FrameMap::validate_frame() { bool FrameMap::validate_frame() {
int max_offset = in_bytes(framesize_in_bytes()); int max_offset = in_bytes(framesize_in_bytes());
int java_index = 0; int java_index = 0;

View file

@ -143,6 +143,3 @@
static bool is_caller_save_register (LIR_Opr reg); static bool is_caller_save_register (LIR_Opr reg);
static bool is_caller_save_register (Register r); static bool is_caller_save_register (Register r);
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }

View file

@ -736,7 +736,8 @@ void LIR_Assembler::align_call(LIR_Code) {
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
__ call(op->addr(), rtype); __ call(op->addr(), rtype);
// the peephole pass fills the delay slot // The peephole pass fills the delay slot, add_call_info is done in
// LIR_Assembler::emit_delay.
} }
@ -745,7 +746,8 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
__ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
__ relocate(rspec); __ relocate(rspec);
__ call(op->addr(), relocInfo::none); __ call(op->addr(), relocInfo::none);
// the peephole pass fills the delay slot // The peephole pass fills the delay slot, add_call_info is done in
// LIR_Assembler::emit_delay.
} }
@ -766,16 +768,6 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
} }
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
// load with 32-bit displacement // load with 32-bit displacement
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
int load_offset = code_offset(); int load_offset = code_offset();
@ -3159,6 +3151,7 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->print_cr("delayed"); tty->print_cr("delayed");
inst->at(i - 1)->print(); inst->at(i - 1)->print();
inst->at(i)->print(); inst->at(i)->print();
tty->cr();
} }
#endif #endif
continue; continue;
@ -3174,8 +3167,8 @@ void LIR_Assembler::peephole(LIR_List* lir) {
case lir_static_call: case lir_static_call:
case lir_virtual_call: case lir_virtual_call:
case lir_icvirtual_call: case lir_icvirtual_call:
case lir_optvirtual_call: { case lir_optvirtual_call:
LIR_Op* delay_op = NULL; case lir_dynamic_call: {
LIR_Op* prev = inst->at(i - 1); LIR_Op* prev = inst->at(i - 1);
if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
(op->code() != lir_virtual_call || (op->code() != lir_virtual_call ||
@ -3192,15 +3185,14 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->print_cr("delayed"); tty->print_cr("delayed");
inst->at(i - 1)->print(); inst->at(i - 1)->print();
inst->at(i)->print(); inst->at(i)->print();
tty->cr();
} }
#endif #endif
continue; continue;
} }
if (!delay_op) { LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
inst->insert_before(i + 1, delay_op); inst->insert_before(i + 1, delay_op);
}
break; break;
} }
} }

View file

@ -221,7 +221,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
if (needs_card_mark) { if (needs_card_mark) {
LIR_Opr ptr = new_pointer_register(); LIR_Opr ptr = new_pointer_register();
__ add(base_opr, LIR_OprFact::intptrConst(offset), ptr); __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
return new LIR_Address(ptr, 0, type); return new LIR_Address(ptr, type);
} else { } else {
return new LIR_Address(base_opr, offset, type); return new LIR_Address(base_opr, offset, type);
} }
@ -231,7 +231,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) { void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr pointer = new_pointer_register(); LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer); __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT); LIR_Address* addr = new LIR_Address(pointer, T_INT);
increment_counter(addr, step); increment_counter(addr, step);
} }
@ -1159,7 +1159,7 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
if (type == T_ARRAY || type == T_OBJECT) { if (type == T_ARRAY || type == T_OBJECT) {
LIR_Opr tmp = new_pointer_register(); LIR_Opr tmp = new_pointer_register();
__ add(base_op, index_op, tmp); __ add(base_op, index_op, tmp);
addr = new LIR_Address(tmp, 0, type); addr = new LIR_Address(tmp, type);
} else { } else {
addr = new LIR_Address(base_op, index_op, type); addr = new LIR_Address(base_op, index_op, type);
} }

View file

@ -679,8 +679,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
G2_thread, Oissuing_pc->after_save()); G2_thread, Oissuing_pc->after_save());
__ verify_not_null_oop(Oexception->after_save()); __ verify_not_null_oop(Oexception->after_save());
__ jmp(O0, 0);
__ delayed()->restore(); // Restore SP from L7 if the exception PC is a MethodHandle call site.
__ mov(O0, G5); // Save the target address.
__ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
__ tst(L0); // Condition codes are preserved over the restore.
__ restore();
__ jmp(G5, 0);
__ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
} }
break; break;

View file

@ -154,7 +154,7 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
} }
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) { static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
assert(MinObjAlignmentInBytes == BytesPerLong, "need alternate implementation"); assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
julong* to = (julong*)tohw; julong* to = (julong*)tohw;
julong v = ((julong)value << 32) | value; julong v = ((julong)value << 32) | value;
@ -162,7 +162,7 @@ static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value)
// and be equal to 0 on 64-bit platform. // and be equal to 0 on 64-bit platform.
size_t odd = count % (BytesPerLong / HeapWordSize) ; size_t odd = count % (BytesPerLong / HeapWordSize) ;
size_t aligned_count = align_object_size(count - odd) / HeapWordsPerLong; size_t aligned_count = align_object_offset(count - odd) / HeapWordsPerLong;
julong* end = ((julong*)tohw) + aligned_count - 1; julong* end = ((julong*)tohw) + aligned_count - 1;
while (to <= end) { while (to <= end) {
DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;) DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;)

View file

@ -336,9 +336,11 @@ frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
#endif // ASSERT #endif // ASSERT
} }
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_stack) { frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
_sp = sp; _sp(sp),
_younger_sp = younger_sp; _younger_sp(younger_sp),
_deopt_state(unknown),
_sp_adjustment_by_callee(0) {
if (younger_sp == NULL) { if (younger_sp == NULL) {
// make a deficient frame which doesn't know where its PC is // make a deficient frame which doesn't know where its PC is
_pc = NULL; _pc = NULL;
@ -352,20 +354,32 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_adjusted_sta
// wrong. (the _last_native_pc will have the right value) // wrong. (the _last_native_pc will have the right value)
// So do not put add any asserts on the _pc here. // So do not put add any asserts on the _pc here.
} }
if (younger_frame_adjusted_stack) {
// compute adjustment to this frame's SP made by its interpreted callee if (_pc != NULL)
_sp_adjustment_by_callee = (intptr_t*)((intptr_t)younger_sp[I5_savedSP->sp_offset_in_saved_window()] + _cb = CodeCache::find_blob(_pc);
STACK_BIAS) - sp;
} else { // Check for MethodHandle call sites.
_sp_adjustment_by_callee = 0; if (_cb != NULL) {
nmethod* nm = _cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
// The SP is already adjusted by this MH call site, don't
// overwrite this value with the wrong interpreter value.
younger_frame_is_interpreted = false;
}
}
} }
_deopt_state = unknown; if (younger_frame_is_interpreted) {
// compute adjustment to this frame's SP made by its interpreted callee
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
}
// It is important that frame be fully construct when we do this lookup // It is important that the frame is fully constructed when we do
// as get_original_pc() needs correct value for unextended_sp() // this lookup as get_deopt_original_pc() needs a correct value for
// unextended_sp() which uses _sp_adjustment_by_callee.
if (_pc != NULL) { if (_pc != NULL) {
_cb = CodeCache::find_blob(_pc);
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
@ -464,7 +478,6 @@ frame frame::sender(RegisterMap* map) const {
intptr_t* younger_sp = sp(); intptr_t* younger_sp = sp();
intptr_t* sp = sender_sp(); intptr_t* sp = sender_sp();
bool adjusted_stack = false;
// Note: The version of this operation on any platform with callee-save // Note: The version of this operation on any platform with callee-save
// registers must update the register map (if not null). // registers must update the register map (if not null).
@ -483,8 +496,8 @@ frame frame::sender(RegisterMap* map) const {
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
// explicitly recognized. // explicitly recognized.
adjusted_stack = is_interpreted_frame(); bool frame_is_interpreted = is_interpreted_frame();
if (adjusted_stack) { if (frame_is_interpreted) {
map->make_integer_regs_unsaved(); map->make_integer_regs_unsaved();
map->shift_window(sp, younger_sp); map->shift_window(sp, younger_sp);
} else if (_cb != NULL) { } else if (_cb != NULL) {
@ -503,7 +516,7 @@ frame frame::sender(RegisterMap* map) const {
} }
} }
} }
return frame(sp, younger_sp, adjusted_stack); return frame(sp, younger_sp, frame_is_interpreted);
} }

View file

@ -720,25 +720,30 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp, void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) { int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) { if (index_size == sizeof(u2)) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
} else { } else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
xor3(tmp, -1, tmp); // convert to plain index xor3(tmp, -1, tmp); // convert to plain index
} else if (index_size == sizeof(u1)) {
assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
ldub(Lbcp, bcp_offset, tmp);
} else {
ShouldNotReachHere();
} }
} }
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) { int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp); assert_different_registers(cache, tmp);
assert_not_delayed(); assert_not_delayed();
get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index); get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
// convert from field index to ConstantPoolCacheEntry index and from // convert from field index to ConstantPoolCacheEntry index and from
// word index to byte offset // word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
@ -747,12 +752,15 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) { int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, tmp); assert_different_registers(cache, tmp);
assert_not_delayed(); assert_not_delayed();
assert(!giant_index,"NYI"); if (index_size == sizeof(u2)) {
get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
} else {
ShouldNotReachHere(); // other sizes not supported here
}
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset // and from word index to byte offset
sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);

View file

@ -182,9 +182,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register Rdst, Register Rdst,
setCCOrNot should_set_CC = dont_set_CC ); setCCOrNot should_set_CC = dont_set_CC );
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false); void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false); void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
// common code // common code

View file

@ -375,10 +375,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register O0_scratch = O0_argslot; Register O0_scratch = O0_argslot;
int stackElementSize = Interpreter::stackElementSize; int stackElementSize = Interpreter::stackElementSize;
// Make space on the stack for the arguments. // Make space on the stack for the arguments and set Gargs
__ sub(SP, 4*stackElementSize, SP); // correctly.
__ sub(Gargs, 3*stackElementSize, Gargs); __ sub(SP, 4*stackElementSize, SP); // Keep stack aligned.
//__ sub(Lesp, 3*stackElementSize, Lesp); __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
// void raiseException(int code, Object actual, Object required) // void raiseException(int code, Object actual, Object required)
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code __ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -321,7 +321,8 @@ void NativeMovConstReg::set_data(intptr_t x) {
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x)); set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any // also store the value into an oop_Relocation cell, if any
CodeBlob* nm = CodeCache::find_blob(instruction_address()); CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) { if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address()); RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL; oop* oop_addr = NULL;
@ -430,7 +431,8 @@ void NativeMovConstRegPatching::set_data(int x) {
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x)); set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any // also store the value into an oop_Relocation cell, if any
CodeBlob* nm = CodeCache::find_blob(instruction_address()); CodeBlob* cb = CodeCache::find_blob(instruction_address());
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) { if (nm != NULL) {
RelocIterator iter(nm, instruction_address(), next_instruction_address()); RelocIterator iter(nm, instruction_address(), next_instruction_address());
oop* oop_addr = NULL; oop* oop_addr = NULL;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -142,9 +142,12 @@ REGISTER_DEFINITION(Register, G1_scratch);
REGISTER_DEFINITION(Register, G3_scratch); REGISTER_DEFINITION(Register, G3_scratch);
REGISTER_DEFINITION(Register, G4_scratch); REGISTER_DEFINITION(Register, G4_scratch);
REGISTER_DEFINITION(Register, Gtemp); REGISTER_DEFINITION(Register, Gtemp);
REGISTER_DEFINITION(Register, Lentry_args);
// JSR 292
REGISTER_DEFINITION(Register, G5_method_type); REGISTER_DEFINITION(Register, G5_method_type);
REGISTER_DEFINITION(Register, G3_method_handle); REGISTER_DEFINITION(Register, G3_method_handle);
REGISTER_DEFINITION(Register, Lentry_args); REGISTER_DEFINITION(Register, L7_mh_SP_save);
#ifdef CC_INTERP #ifdef CC_INTERP
REGISTER_DEFINITION(Register, Lstate); REGISTER_DEFINITION(Register, Lstate);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -116,6 +116,11 @@ void OptoRuntime::generate_exception_blob() {
__ mov(O0, G3_scratch); // Move handler address to temp __ mov(O0, G3_scratch); // Move handler address to temp
__ restore(); __ restore();
// Restore SP from L7 if the exception PC is a MethodHandle call site.
__ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7);
__ tst(O7);
__ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);
// G3_scratch contains handler address // G3_scratch contains handler address
// Since this may be the deopt blob we must set O7 to look like we returned // Since this may be the deopt blob we must set O7 to look like we returned
// from the original pc that threw the exception // from the original pc that threw the exception

View file

@ -915,19 +915,6 @@ void AdapterGenerator::gen_i2c_adapter(
// Gargs is the incoming argument base, and also an outgoing argument. // Gargs is the incoming argument base, and also an outgoing argument.
__ sub(Gargs, BytesPerWord, Gargs); __ sub(Gargs, BytesPerWord, Gargs);
#ifdef ASSERT
{
// on entry OsavedSP and SP should be equal
Label ok;
__ cmp(O5_savedSP, SP);
__ br(Assembler::equal, false, Assembler::pt, ok);
__ delayed()->nop();
__ stop("I5_savedSP not set");
__ should_not_reach_here();
__ bind(ok);
}
#endif
// ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
// WITH O7 HOLDING A VALID RETURN PC // WITH O7 HOLDING A VALID RETURN PC
// //

View file

@ -534,7 +534,10 @@ bool can_branch_register( Node *bol, Node *cmp ) {
// The "return address" is the address of the call instruction, plus 8. // The "return address" is the address of the call instruction, plus 8.
int MachCallStaticJavaNode::ret_addr_offset() { int MachCallStaticJavaNode::ret_addr_offset() {
return NativeCall::instruction_size; // call; delay slot int offset = NativeCall::instruction_size; // call; delay slot
if (_method_handle_invoke)
offset += 4; // restore SP
return offset;
} }
int MachCallDynamicJavaNode::ret_addr_offset() { int MachCallDynamicJavaNode::ret_addr_offset() {
@ -818,6 +821,10 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) && !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) && !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load8B && ld_op==Op_LoadD) &&
!(n->rule() == loadUB_rule)) { !(n->rule() == loadUB_rule)) {
verify_oops_warning(n, n->ideal_Opcode(), ld_op); verify_oops_warning(n, n->ideal_Opcode(), ld_op);
} }
@ -829,6 +836,9 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
!(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
!(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
!(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) &&
!(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
verify_oops_warning(n, n->ideal_Opcode(), st_op); verify_oops_warning(n, n->ideal_Opcode(), st_op);
} }
@ -1750,6 +1760,12 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs // registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = false; const bool Matcher::clone_shift_expressions = false;
bool Matcher::narrow_oop_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedOops, "only for compressed oops code");
return false;
}
// Is it better to copy float constants, or load them directly from memory? // Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no // Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a // extra registers. Most RISCs will have to materialize an address into a
@ -1858,7 +1874,7 @@ RegMask Matcher::modL_proj_mask() {
} }
const RegMask Matcher::method_handle_invoke_SP_save_mask() { const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return RegMask(); return L7_REGP_mask;
} }
%} %}
@ -2441,6 +2457,16 @@ encode %{
/*preserve_g2=*/true, /*force far call*/true); /*preserve_g2=*/true, /*force far call*/true);
%} %}
enc_class preserve_SP %{
MacroAssembler _masm(&cbuf);
__ mov(SP, L7_mh_SP_save);
%}
enc_class restore_SP %{
MacroAssembler _masm(&cbuf);
__ mov(L7_mh_SP_save, SP);
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call. // who we intended to call.
@ -9213,6 +9239,7 @@ instruct safePoint_poll(iRegP poll) %{
// Call Java Static Instruction // Call Java Static Instruction
instruct CallStaticJavaDirect( method meth ) %{ instruct CallStaticJavaDirect( method meth ) %{
match(CallStaticJava); match(CallStaticJava);
predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth); effect(USE meth);
size(8); size(8);
@ -9223,6 +9250,20 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_pipe(simple_call); ins_pipe(simple_call);
%} %}
// Call Java Static Instruction (method handle version)
instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth, KILL l7_mh_SP_save);
size(8);
ins_cost(CALL_COST);
format %{ "CALL,static/MethodHandle" %}
ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
ins_pc_relative(1);
ins_pipe(simple_call);
%}
// Call Java Dynamic Instruction // Call Java Dynamic Instruction
instruct CallDynamicJavaDirect( method meth ) %{ instruct CallDynamicJavaDirect( method meth ) %{
match(CallDynamicJava); match(CallDynamicJava);

View file

@ -2911,16 +2911,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers // arraycopy stubs used by compilers
generate_arraycopy_stubs(); generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
// Don't initialize the platform math functions since sparc // Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations. // doesn't have intrinsics for these operations.
} }

View file

@ -43,7 +43,7 @@ enum /* platform_dependent_constants */ {
// MethodHandles adapters // MethodHandles adapters
enum method_handles_platform_dependent_constants { enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000 method_handles_adapters_code_size = 6000
}; };
class Sparc { class Sparc {

View file

@ -204,7 +204,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code... // out of the main line of code...
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
__ bind(L_giant_index); __ bind(L_giant_index);
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true); __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
__ ba(false, L_got_cache); __ ba(false, L_got_cache);
__ delayed()->nop(); __ delayed()->nop();
} }

View file

@ -1949,23 +1949,30 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { void TemplateTable::resolve_cache_and_index(int byte_no,
assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); Register result,
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic); Register Rcache,
Register index,
size_t index_size) {
// Depends on cpCacheOop layout! // Depends on cpCacheOop layout!
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (is_invokedynamic) { if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null CallSite object. // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert_different_registers(result, Rcache);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), Lbyte_code); ConstantPoolCacheEntry::f1_offset(), result);
__ tst(Lbyte_code); __ tst(result);
__ br(Assembler::notEqual, false, Assembler::pt, resolved); __ br(Assembler::notEqual, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1); __ delayed()->set((int)bytecode(), O1);
} else { } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no)*BitsPerByte;
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset(), Lbyte_code); ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
@ -1992,7 +1999,10 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
// first time invocation - must resolve first // first time invocation - must resolve first
__ call_VM(noreg, entry, O1); __ call_VM(noreg, entry, O1);
// Update registers with resolved info // Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (result != noreg)
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), result);
__ bind(resolved); __ bind(resolved);
} }
@ -2001,7 +2011,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register Ritable_index, Register Ritable_index,
Register Rflags, Register Rflags,
bool is_invokevirtual, bool is_invokevirtual,
bool is_invokevfinal) { bool is_invokevfinal,
bool is_invokedynamic) {
// Uses both G3_scratch and G4_scratch // Uses both G3_scratch and G4_scratch
Register Rcache = G3_scratch; Register Rcache = G3_scratch;
Register Rscratch = G4_scratch; Register Rscratch = G4_scratch;
@ -2025,11 +2036,15 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
if (is_invokevfinal) { if (is_invokevfinal) {
__ get_cache_and_index_at_bcp(Rcache, Rscratch, 1); __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
__ ld_ptr(Rcache, method_offset, Rmethod);
} else if (byte_no == f1_oop) {
// Resolved f1_oop goes directly into 'method' register.
resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
} else { } else {
resolve_cache_and_index(byte_no, Rcache, Rscratch); resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
__ ld_ptr(Rcache, method_offset, Rmethod);
} }
__ ld_ptr(Rcache, method_offset, Rmethod);
if (Ritable_index != noreg) { if (Ritable_index != noreg) {
__ ld_ptr(Rcache, index_offset, Ritable_index); __ ld_ptr(Rcache, index_offset, Ritable_index);
} }
@ -2110,7 +2125,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
Register Rflags = G1_scratch; Register Rflags = G1_scratch;
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
resolve_cache_and_index(byte_no, Rcache, index); resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
jvmti_post_field_access(Rcache, index, is_static, false); jvmti_post_field_access(Rcache, index, is_static, false);
load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
@ -2475,7 +2490,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Register Rflags = G1_scratch; Register Rflags = G1_scratch;
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
resolve_cache_and_index(byte_no, Rcache, index); resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
jvmti_post_field_mod(Rcache, index, is_static); jvmti_post_field_mod(Rcache, index, is_static);
load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
@ -2816,6 +2831,7 @@ void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Regist
void TemplateTable::invokevirtual(int byte_no) { void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
Register Rscratch = G3_scratch; Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch; Register Rtemp = G4_scratch;
@ -2823,7 +2839,7 @@ void TemplateTable::invokevirtual(int byte_no) {
Register Rrecv = G5_method; Register Rrecv = G5_method;
Label notFinal; Label notFinal;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true); load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// Check for vfinal // Check for vfinal
@ -2864,9 +2880,10 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) { void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
/*is_invokevfinal*/true); /*is_invokevfinal*/true, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
invokevfinal_helper(G3_scratch, Lscratch); invokevfinal_helper(G3_scratch, Lscratch);
} }
@ -2901,12 +2918,13 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch; Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch; Register Rtemp = G4_scratch;
Register Rret = Lscratch; Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false); load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method); __ verify_oop(G5_method);
@ -2934,12 +2952,13 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch; Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch; Register Rtemp = G4_scratch;
Register Rret = Lscratch; Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false); load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method); __ verify_oop(G5_method);
@ -2992,6 +3011,7 @@ void TemplateTable::invokeinterface_object_method(Register RklassOop,
void TemplateTable::invokeinterface(int byte_no) { void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G4_scratch; Register Rscratch = G4_scratch;
Register Rret = G3_scratch; Register Rret = G3_scratch;
@ -3001,7 +3021,7 @@ void TemplateTable::invokeinterface(int byte_no) {
Register Rflags = O1; Register Rflags = O1;
assert_different_registers(Rscratch, G5_method); assert_different_registers(Rscratch, G5_method);
load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false); load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// get receiver // get receiver
@ -3118,6 +3138,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) { void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument");
if (!EnableInvokeDynamic) { if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic. // We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3132,7 +3153,6 @@ void TemplateTable::invokedynamic(int byte_no) {
// G5: CallSite object (f1) // G5: CallSite object (f1)
// XX: unused (f2) // XX: unused (f2)
// G3: receiver address
// XX: flags (unused) // XX: flags (unused)
Register G5_callsite = G5_method; Register G5_callsite = G5_method;
@ -3140,7 +3160,8 @@ void TemplateTable::invokedynamic(int byte_no) {
Register Rtemp = G1_scratch; Register Rtemp = G1_scratch;
Register Rret = Lscratch; Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false); load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
/*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_callsite); __ verify_oop(G5_callsite);

View file

@ -65,13 +65,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseInlineCaches, false); FLAG_SET_DEFAULT(UseInlineCaches, false);
} }
#ifdef _LP64 #ifdef _LP64
// Single issue niagara1 is slower for CompressedOops
// but niagaras after that it's fine.
if (!is_niagara1_plus()) {
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
FLAG_SET_ERGO(bool, UseCompressedOops, false);
}
}
// 32-bit oops don't make sense for the 64-bit VM on sparc // 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects. // since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);

View file

@ -8185,9 +8185,14 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != NULL, "java heap should be initialized");
movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
if (Universe::narrow_oop_shift() != 0) { if (Universe::narrow_oop_shift() != 0) {
assert(Address::times_8 == LogMinObjAlignmentInBytes && assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); if (LogMinObjAlignmentInBytes == Address::times_8) {
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
} else {
// OK to use shift since we don't need to preserve flags.
shlq(dst, LogMinObjAlignmentInBytes);
movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
}
} else { } else {
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
} }
@ -8361,31 +8366,43 @@ void MacroAssembler::decode_heap_oop(Register r) {
} }
void MacroAssembler::decode_heap_oop_not_null(Register r) { void MacroAssembler::decode_heap_oop_not_null(Register r) {
// Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != NULL, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) { if (Universe::narrow_oop_shift() != 0) {
assert (Address::times_8 == LogMinObjAlignmentInBytes && assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); shlq(r, LogMinObjAlignmentInBytes);
// Don't use Shift since it modifies flags. if (Universe::narrow_oop_base() != NULL) {
leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); addq(r, r12_heapbase);
}
} else { } else {
assert (Universe::narrow_oop_base() == NULL, "sanity"); assert (Universe::narrow_oop_base() == NULL, "sanity");
} }
} }
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
// Note: it will change flags
assert (UseCompressedOops, "should only be used for compressed headers"); assert (UseCompressedOops, "should only be used for compressed headers");
assert (Universe::heap() != NULL, "java heap should be initialized"); assert (Universe::heap() != NULL, "java heap should be initialized");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) { if (Universe::narrow_oop_shift() != 0) {
assert (Address::times_8 == LogMinObjAlignmentInBytes && assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); if (LogMinObjAlignmentInBytes == Address::times_8) {
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
} else {
if (dst != src) {
movq(dst, src);
}
shlq(dst, LogMinObjAlignmentInBytes);
if (Universe::narrow_oop_base() != NULL) {
addq(dst, r12_heapbase);
}
}
} else if (dst != src) { } else if (dst != src) {
assert (Universe::narrow_oop_base() == NULL, "sanity"); assert (Universe::narrow_oop_base() == NULL, "sanity");
movq(dst, src); movq(dst, src);

View file

@ -135,6 +135,9 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#endif // _LP64 #endif // _LP64
// JSR 292 fixed register usages:
REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
// Address is an abstraction used to represent a memory location // Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object. // using any of the amd64 addressing modes with one object.
// //

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -309,6 +309,13 @@ LIR_Opr FrameMap::stack_pointer() {
} }
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
assert(rbp == rbp_mh_SP_save, "must be same register");
return rbp_opr;
}
bool FrameMap::validate_frame() { bool FrameMap::validate_frame() {
return true; return true;
} }

View file

@ -126,6 +126,3 @@
assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds"); assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
return _caller_save_xmm_regs[i]; return _caller_save_xmm_regs[i];
} }
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }

View file

@ -2462,9 +2462,18 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
} }
#endif // _LP64 #endif // _LP64
} else { } else {
#ifdef _LP64
Register r_lo;
if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
r_lo = right->as_register();
} else {
r_lo = right->as_register_lo();
}
#else
Register r_lo = right->as_register_lo(); Register r_lo = right->as_register_lo();
Register r_hi = right->as_register_hi(); Register r_hi = right->as_register_hi();
assert(l_lo != r_hi, "overwriting registers"); assert(l_lo != r_hi, "overwriting registers");
#endif
switch (code) { switch (code) {
case lir_logic_and: case lir_logic_and:
__ andptr(l_lo, r_lo); __ andptr(l_lo, r_lo);
@ -2784,7 +2793,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned"); "must be aligned");
__ call(AddressLiteral(op->addr(), rtype)); __ call(AddressLiteral(op->addr(), rtype));
add_call_info(code_offset(), op->info(), op->is_method_handle_invoke()); add_call_info(code_offset(), op->info());
} }
@ -2795,7 +2804,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
(__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned"); "must be aligned");
__ call(AddressLiteral(op->addr(), rh)); __ call(AddressLiteral(op->addr(), rh));
add_call_info(code_offset(), op->info(), op->is_method_handle_invoke()); add_call_info(code_offset(), op->info());
} }
@ -2805,16 +2814,6 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
} }
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
__ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
__ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
}
void LIR_Assembler::emit_static_call_stub() { void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc(); address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size); address stub = __ start_a_stub(call_stub_size);

View file

@ -175,7 +175,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
// store and again for the card mark. // store and again for the card mark.
LIR_Opr tmp = new_pointer_register(); LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp); __ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, 0, type); return new LIR_Address(tmp, type);
} else { } else {
return addr; return addr;
} }
@ -185,7 +185,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) { void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr pointer = new_pointer_register(); LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer); __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT); LIR_Address* addr = new LIR_Address(pointer, T_INT);
increment_counter(addr, step); increment_counter(addr, step);
} }

View file

@ -782,7 +782,7 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// Restore SP from BP if the exception PC is a MethodHandle call site. // Restore SP from BP if the exception PC is a MethodHandle call site.
NOT_LP64(__ get_thread(thread);) NOT_LP64(__ get_thread(thread);)
__ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp); __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// continue at exception handler (return address removed) // continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the // note: do *not* remove arguments when unwinding the
@ -1581,7 +1581,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ should_not_reach_here(); __ should_not_reach_here();
break; break;
} }
__ push(rax); __ push(rax);
__ push(rdx); __ push(rdx);
@ -1605,8 +1604,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// Can we store original value in the thread's buffer? // Can we store original value in the thread's buffer?
LP64_ONLY(__ movslq(tmp, queue_index);)
#ifdef _LP64 #ifdef _LP64
__ movslq(tmp, queue_index);
__ cmpq(tmp, 0); __ cmpq(tmp, 0);
#else #else
__ cmpl(queue_index, 0); __ cmpl(queue_index, 0);
@ -1628,13 +1627,33 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ jmp(done); __ jmp(done);
__ bind(runtime); __ bind(runtime);
// load the pre-value
__ push(rcx); __ push(rcx);
#ifdef _LP64
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
# ifndef _WIN64
__ push(rdi);
__ push(rsi);
# endif
#endif
// load the pre-value
f.load_argument(0, rcx); f.load_argument(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
#ifdef _LP64
# ifndef _WIN64
__ pop(rsi);
__ pop(rdi);
# endif
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
#endif
__ pop(rcx); __ pop(rcx);
__ bind(done); __ bind(done);
__ pop(rdx); __ pop(rdx);
__ pop(rax); __ pop(rax);
} }
@ -1664,13 +1683,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
PtrQueue::byte_offset_of_buf())); PtrQueue::byte_offset_of_buf()));
__ push(rax); __ push(rax);
__ push(rdx); __ push(rcx);
NOT_LP64(__ get_thread(thread);) NOT_LP64(__ get_thread(thread);)
ExternalAddress cardtable((address)ct->byte_map_base); ExternalAddress cardtable((address)ct->byte_map_base);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
const Register card_addr = rdx; const Register card_addr = rcx;
#ifdef _LP64 #ifdef _LP64
const Register tmp = rscratch1; const Register tmp = rscratch1;
f.load_argument(0, card_addr); f.load_argument(0, card_addr);
@ -1679,7 +1698,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the address of the card // get the address of the card
__ addq(card_addr, tmp); __ addq(card_addr, tmp);
#else #else
const Register card_index = rdx; const Register card_index = rcx;
f.load_argument(0, card_index); f.load_argument(0, card_index);
__ shrl(card_index, CardTableModRefBS::card_shift); __ shrl(card_index, CardTableModRefBS::card_shift);
@ -1716,12 +1735,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ jmp(done); __ jmp(done);
__ bind(runtime); __ bind(runtime);
NOT_LP64(__ push(rcx);) __ push(rdx);
#ifdef _LP64
__ push(r8);
__ push(r9);
__ push(r10);
__ push(r11);
# ifndef _WIN64
__ push(rdi);
__ push(rsi);
# endif
#endif
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
NOT_LP64(__ pop(rcx);) #ifdef _LP64
# ifndef _WIN64
__ bind(done); __ pop(rsi);
__ pop(rdi);
# endif
__ pop(r11);
__ pop(r10);
__ pop(r9);
__ pop(r8);
#endif
__ pop(rdx); __ pop(rdx);
__ bind(done);
__ pop(rcx);
__ pop(rax); __ pop(rax);
} }

View file

@ -189,11 +189,11 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
} }
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) { void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) { if (index_size == sizeof(u2)) {
load_unsigned_short(reg, Address(rsi, bcp_offset)); load_unsigned_short(reg, Address(rsi, bcp_offset));
} else { } else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(reg, Address(rsi, bcp_offset)); movl(reg, Address(rsi, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise // Check if the secondary index definition is still ~x, otherwise
@ -201,14 +201,19 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
// plain index. // plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index notl(reg); // convert to plain index
} else if (index_size == sizeof(u1)) {
assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
load_unsigned_byte(reg, Address(rsi, bcp_offset));
} else {
ShouldNotReachHere();
} }
} }
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset, bool giant_index) { int bcp_offset, size_t index_size) {
assert(cache != index, "must use different registers"); assert(cache != index, "must use different registers");
get_cache_index_at_bcp(index, bcp_offset, giant_index); get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
@ -216,9 +221,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index) { int bcp_offset, size_t index_size) {
assert(cache != tmp, "must use different register"); assert(cache != tmp, "must use different register");
get_cache_index_at_bcp(tmp, bcp_offset, giant_index); get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset // and from word offset to byte offset

View file

@ -76,9 +76,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
} }
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false); void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
// Expression stack // Expression stack
void f2ieee(); // truncate ftos to 32bits void f2ieee(); // truncate ftos to 32bits

View file

@ -187,11 +187,11 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
int bcp_offset, int bcp_offset,
bool giant_index) { size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) { if (index_size == sizeof(u2)) {
load_unsigned_short(index, Address(r13, bcp_offset)); load_unsigned_short(index, Address(r13, bcp_offset));
} else { } else if (index_size == sizeof(u4)) {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(index, Address(r13, bcp_offset)); movl(index, Address(r13, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise // Check if the secondary index definition is still ~x, otherwise
@ -199,6 +199,11 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
// plain index. // plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(index); // convert to plain index notl(index); // convert to plain index
} else if (index_size == sizeof(u1)) {
assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
load_unsigned_byte(index, Address(r13, bcp_offset));
} else {
ShouldNotReachHere();
} }
} }
@ -206,9 +211,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index, Register index,
int bcp_offset, int bcp_offset,
bool giant_index) { size_t index_size) {
assert(cache != index, "must use different registers"); assert(cache != index, "must use different registers");
get_cache_index_at_bcp(index, bcp_offset, giant_index); get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
@ -219,9 +224,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp, Register tmp,
int bcp_offset, int bcp_offset,
bool giant_index) { size_t index_size) {
assert(cache != tmp, "must use different register"); assert(cache != tmp, "must use different register");
get_cache_index_at_bcp(tmp, bcp_offset, giant_index); get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset // and from word offset to byte offset

View file

@ -95,10 +95,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, void get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset, bool giant_index = false); int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, bool giant_index = false); int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void pop_ptr(Register r = rax); void pop_ptr(Register r = rax);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -115,3 +115,6 @@ REGISTER_DEFINITION(MMXRegister, mmx4 );
REGISTER_DEFINITION(MMXRegister, mmx5 ); REGISTER_DEFINITION(MMXRegister, mmx5 );
REGISTER_DEFINITION(MMXRegister, mmx6 ); REGISTER_DEFINITION(MMXRegister, mmx6 );
REGISTER_DEFINITION(MMXRegister, mmx7 ); REGISTER_DEFINITION(MMXRegister, mmx7 );
// JSR 292
REGISTER_DEFINITION(Register, rbp_mh_SP_save);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -117,7 +117,7 @@ void OptoRuntime::generate_exception_blob() {
// Restore SP from BP if the exception PC is a MethodHandle call site. // Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0); __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp); __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// We have a handler in rax, (could be deopt blob) // We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it. // rdx - throwing pc, deopt blob will need it.

View file

@ -3305,7 +3305,7 @@ void OptoRuntime::generate_exception_blob() {
// Restore SP from BP if the exception PC is a MethodHandle call site. // Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0); __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp); __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
// We have a handler in rax (could be deopt blob). // We have a handler in rax (could be deopt blob).
__ mov(r8, rax); __ mov(r8, rax);

View file

@ -214,7 +214,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index); __ jcc(Assembler::equal, L_giant_index);
} }
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false); __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache); __ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx, __ movl(rbx, Address(rbx, rcx,
Address::times_ptr, constantPoolCacheOopDesc::base_offset() + Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
@ -226,7 +226,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code... // out of the main line of code...
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
__ bind(L_giant_index); __ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true); __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
__ jmp(L_got_cache); __ jmp(L_got_cache);
} }

View file

@ -192,7 +192,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index); __ jcc(Assembler::equal, L_giant_index);
} }
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false); __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache); __ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx, __ movl(rbx, Address(rbx, rcx,
Address::times_ptr, Address::times_ptr,
@ -205,7 +205,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// out of the main line of code... // out of the main line of code...
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
__ bind(L_giant_index); __ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true); __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
__ jmp(L_got_cache); __ jmp(L_got_cache);
} }

View file

@ -2012,22 +2012,29 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
__ membar(order_constraint); __ membar(order_constraint);
} }
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { void TemplateTable::resolve_cache_and_index(int byte_no,
assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); Register result,
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic); Register Rcache,
Register index,
size_t index_size) {
Register temp = rbx; Register temp = rbx;
assert_different_registers(Rcache, index, temp); assert_different_registers(result, Rcache, index, temp);
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (is_invokedynamic) { if (byte_no == f1_oop) {
// we are resolved if the f1 field contains a non-null CallSite object // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
__ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD); // This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ testptr(result, result);
__ jcc(Assembler::notEqual, resolved); __ jcc(Assembler::notEqual, resolved);
} else { } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no)*BitsPerByte;
__ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count); __ shrl(temp, shift_count);
// have we resolved this bytecode? // have we resolved this bytecode?
@ -2053,7 +2060,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ movl(temp, (int)bytecode()); __ movl(temp, (int)bytecode());
__ call_VM(noreg, entry, temp); __ call_VM(noreg, entry, temp);
// Update registers with resolved info // Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (result != noreg)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved); __ bind(resolved);
} }
@ -2087,7 +2096,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index, Register itable_index,
Register flags, Register flags,
bool is_invokevirtual, bool is_invokevirtual,
bool is_invokevfinal /*unused*/) { bool is_invokevfinal /*unused*/,
bool is_invokedynamic) {
// setup registers // setup registers
const Register cache = rcx; const Register cache = rcx;
const Register index = rdx; const Register index = rdx;
@ -2109,9 +2119,14 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()); ConstantPoolCacheEntry::f2_offset());
resolve_cache_and_index(byte_no, cache, index); if (byte_no == f1_oop) {
// Resolved f1_oop goes directly into 'method' register.
assert(is_invokedynamic, "");
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
} else {
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
}
if (itable_index != noreg) { if (itable_index != noreg) {
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
} }
@ -2169,7 +2184,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register off = rbx; const Register off = rbx;
const Register flags = rax; const Register flags = rax;
resolve_cache_and_index(byte_no, cache, index); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false); jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2378,7 +2393,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register off = rbx; const Register off = rbx;
const Register flags = rax; const Register flags = rax;
resolve_cache_and_index(byte_no, cache, index); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static); jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2815,10 +2830,11 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// save 'interpreter return address' // save 'interpreter return address'
__ save_bcp(); __ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual); load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet) // load receiver if needed (note: no return address pushed yet)
if (load_receiver) { if (load_receiver) {
assert(!is_invokedynamic, "");
__ movl(recv, flags); __ movl(recv, flags);
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
// recv count is 0 based? // recv count is 0 based?
@ -2910,6 +2926,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
void TemplateTable::invokevirtual(int byte_no) { void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(rbx, noreg, byte_no);
// rbx,: index // rbx,: index
@ -2922,6 +2939,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(rbx, noreg, byte_no);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
@ -2932,6 +2950,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(rbx, noreg, byte_no);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
@ -2942,12 +2961,14 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) { void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
__ stop("fast_invokevfinal not used on x86"); __ stop("fast_invokevfinal not used on x86");
} }
void TemplateTable::invokeinterface(int byte_no) { void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no); prepare_invoke(rax, rbx, byte_no);
// rax,: Interface // rax,: Interface
@ -3036,11 +3057,11 @@ void TemplateTable::invokedynamic(int byte_no) {
return; return;
} }
assert(byte_no == f1_oop, "use this argument");
prepare_invoke(rax, rbx, byte_no); prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1) // rax: CallSite object (f1)
// rbx: unused (f2) // rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused) // rdx: flags (unused)
if (ProfileInterpreter) { if (ProfileInterpreter) {

View file

@ -2015,21 +2015,28 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
} }
} }
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { void TemplateTable::resolve_cache_and_index(int byte_no,
assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); Register result,
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic); Register Rcache,
Register index,
size_t index_size) {
const Register temp = rbx; const Register temp = rbx;
assert_different_registers(Rcache, index, temp); assert_different_registers(result, Rcache, index, temp);
const int shift_count = (1 + byte_no) * BitsPerByte;
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (is_invokedynamic) { if (byte_no == f1_oop) {
// we are resolved if the f1 field contains a non-null CallSite object // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
__ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD); // This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ testptr(result, result);
__ jcc(Assembler::notEqual, resolved); __ jcc(Assembler::notEqual, resolved);
} else { } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no) * BitsPerByte;
__ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count); __ shrl(temp, shift_count);
// have we resolved this bytecode? // have we resolved this bytecode?
@ -2064,7 +2071,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ call_VM(noreg, entry, temp); __ call_VM(noreg, entry, temp);
// Update registers with resolved info // Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (result != noreg)
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved); __ bind(resolved);
} }
@ -2100,7 +2109,8 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index, Register itable_index,
Register flags, Register flags,
bool is_invokevirtual, bool is_invokevirtual,
bool is_invokevfinal /*unused*/) { bool is_invokevfinal, /*unused*/
bool is_invokedynamic) {
// setup registers // setup registers
const Register cache = rcx; const Register cache = rcx;
const Register index = rdx; const Register index = rdx;
@ -2120,15 +2130,18 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()); ConstantPoolCacheEntry::f2_offset());
resolve_cache_and_index(byte_no, cache, index); if (byte_no == f1_oop) {
// Resolved f1_oop goes directly into 'method' register.
assert(wordSize == 8, "adjust code below"); assert(is_invokedynamic, "");
__ movptr(method, Address(cache, index, Address::times_8, method_offset)); resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
if (itable_index != noreg) { } else {
__ movptr(itable_index, resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
Address(cache, index, Address::times_8, index_offset)); __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
} }
__ movl(flags , Address(cache, index, Address::times_8, flags_offset)); if (itable_index != noreg) {
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
} }
@ -2187,7 +2200,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register flags = rax; const Register flags = rax;
const Register bc = c_rarg3; // uses same reg as obj, so don't mix them const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
resolve_cache_and_index(byte_no, cache, index); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false); jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2390,7 +2403,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register flags = rax; const Register flags = rax;
const Register bc = c_rarg3; const Register bc = c_rarg3;
resolve_cache_and_index(byte_no, cache, index); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static); jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@ -2815,10 +2828,11 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// save 'interpreter return address' // save 'interpreter return address'
__ save_bcp(); __ save_bcp();
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual); load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet) // load receiver if needed (note: no return address pushed yet)
if (load_receiver) { if (load_receiver) {
assert(!is_invokedynamic, "");
__ movl(recv, flags); __ movl(recv, flags);
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
@ -2914,6 +2928,7 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) { void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(rbx, noreg, byte_no);
// rbx: index // rbx: index
@ -2926,6 +2941,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(rbx, noreg, byte_no);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
@ -2936,6 +2952,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(rbx, noreg, byte_no);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
@ -2945,11 +2962,13 @@ void TemplateTable::invokestatic(int byte_no) {
void TemplateTable::fast_invokevfinal(int byte_no) { void TemplateTable::fast_invokevfinal(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument");
__ stop("fast_invokevfinal not used on amd64"); __ stop("fast_invokevfinal not used on amd64");
} }
void TemplateTable::invokeinterface(int byte_no) { void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no); prepare_invoke(rax, rbx, byte_no);
// rax: Interface // rax: Interface
@ -3027,6 +3046,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) { void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument");
if (!EnableInvokeDynamic) { if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic. // We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3039,6 +3059,7 @@ void TemplateTable::invokedynamic(int byte_no) {
return; return;
} }
assert(byte_no == f1_oop, "use this argument");
prepare_invoke(rax, rbx, byte_no); prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1) // rax: CallSite object (f1)

View file

@ -1377,6 +1377,12 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// registers? True for Intel but false for most RISCs // registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true; const bool Matcher::clone_shift_expressions = true;
bool Matcher::narrow_oop_use_complex_address() {
ShouldNotCallThis();
return true;
}
// Is it better to copy float constants, or load them directly from memory? // Is it better to copy float constants, or load them directly from memory?
// Intel can load a float constant from a direct address, requiring no // Intel can load a float constant from a direct address, requiring no
// extra registers. Most RISCs will have to materialize an address into a // extra registers. Most RISCs will have to materialize an address into a
@ -1841,14 +1847,14 @@ encode %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls. // RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP. // Use it to preserve RSP in places where the callee might change the SP.
__ movptr(rbp, rsp); __ movptr(rbp_mh_SP_save, rsp);
debug_only(int off1 = cbuf.code_size()); debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%} %}
enc_class restore_SP %{ enc_class restore_SP %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
__ movptr(rsp, rbp); __ movptr(rsp, rbp_mh_SP_save);
%} %}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
@ -13570,7 +13576,7 @@ instruct CallStaticJavaDirect(method meth) %{
// Call Java Static Instruction (method handle version) // Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and // Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted. // compute_padding() functions will have to be adjusted.
instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{ instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
match(CallStaticJava); match(CallStaticJava);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth); effect(USE meth);

View file

@ -1851,29 +1851,24 @@ uint reloc_java_to_interp()
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{ {
if (UseCompressedOops) { if (UseCompressedOops) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes()); st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
if (Universe::narrow_oop_shift() != 0) { if (Universe::narrow_oop_shift() != 0) {
st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]"); st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
} }
st->print_cr("cmpq rax, rscratch1\t # Inline cache check"); st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
} else { } else {
st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t" st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
"# Inline cache check", oopDesc::klass_offset_in_bytes()); "# Inline cache check");
} }
st->print_cr("\tjne SharedRuntime::_ic_miss_stub"); st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
st->print_cr("\tnop"); st->print_cr("\tnop\t# nops to align entry point");
if (!OptoBreakpoint) {
st->print_cr("\tnop");
}
} }
#endif #endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{ {
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
#ifdef ASSERT
uint code_size = cbuf.code_size(); uint code_size = cbuf.code_size();
#endif
if (UseCompressedOops) { if (UseCompressedOops) {
masm.load_klass(rscratch1, j_rarg0); masm.load_klass(rscratch1, j_rarg0);
masm.cmpptr(rax, rscratch1); masm.cmpptr(rax, rscratch1);
@ -1884,33 +1879,21 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly /* WARNING these NOPs are critical so that verified entry point is properly
aligned for patching by NativeJump::patch_verified_entry() */ 4 bytes aligned for patching by NativeJump::patch_verified_entry() */
int nops_cnt = 1; int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
if (!OptoBreakpoint) { if (OptoBreakpoint) {
// Leave space for int3 // Leave space for int3
nops_cnt += 1; nops_cnt -= 1;
}
if (UseCompressedOops) {
// ??? divisible by 4 is aligned?
nops_cnt += 1;
} }
nops_cnt &= 0x3; // Do not add nops if code is aligned.
if (nops_cnt > 0)
masm.nop(nops_cnt); masm.nop(nops_cnt);
assert(cbuf.code_size() - code_size == size(ra_),
"checking code size of inline cache node");
} }
uint MachUEPNode::size(PhaseRegAlloc* ra_) const uint MachUEPNode::size(PhaseRegAlloc* ra_) const
{ {
if (UseCompressedOops) { return MachNode::size(ra_); // too many variables; just compute it
if (Universe::narrow_oop_shift() == 0) { // the hard way
return OptoBreakpoint ? 15 : 16;
} else {
return OptoBreakpoint ? 19 : 20;
}
} else {
return OptoBreakpoint ? 11 : 12;
}
} }
@ -2054,6 +2037,11 @@ const int Matcher::init_array_short_size = 8 * BytesPerLong;
// into registers? True for Intel but false for most RISCs // into registers? True for Intel but false for most RISCs
const bool Matcher::clone_shift_expressions = true; const bool Matcher::clone_shift_expressions = true;
bool Matcher::narrow_oop_use_complex_address() {
assert(UseCompressedOops, "only for compressed oops code");
return (LogMinObjAlignmentInBytes <= 3);
}
// Is it better to copy float constants, or load them directly from // Is it better to copy float constants, or load them directly from
// memory? Intel can load a float constant from a direct address, // memory? Intel can load a float constant from a direct address,
// requiring no extra registers. Most RISCs will have to materialize // requiring no extra registers. Most RISCs will have to materialize
@ -2635,14 +2623,14 @@ encode %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
// RBP is preserved across all calls, even compiled calls. // RBP is preserved across all calls, even compiled calls.
// Use it to preserve RSP in places where the callee might change the SP. // Use it to preserve RSP in places where the callee might change the SP.
__ movptr(rbp, rsp); __ movptr(rbp_mh_SP_save, rsp);
debug_only(int off1 = cbuf.code_size()); debug_only(int off1 = cbuf.code_size());
assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
%} %}
enc_class restore_SP %{ enc_class restore_SP %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
__ movptr(rsp, rbp); __ movptr(rsp, rbp_mh_SP_save);
%} %}
enc_class Java_Static_Call(method meth) enc_class Java_Static_Call(method meth)
@ -5127,7 +5115,7 @@ operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
// Note: x86 architecture doesn't support "scale * index + offset" without a base // Note: x86 architecture doesn't support "scale * index + offset" without a base
// we can't free r12 even with Universe::narrow_oop_base() == NULL. // we can't free r12 even with Universe::narrow_oop_base() == NULL.
operand indCompressedOopOffset(rRegN reg, immL32 off) %{ operand indCompressedOopOffset(rRegN reg, immL32 off) %{
predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0)); predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
constraint(ALLOC_IN_RC(ptr_reg)); constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) off); match(AddP (DecodeN reg) off);
@ -7742,10 +7730,11 @@ instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
ins_pipe(ialu_reg_long); ins_pipe(ialu_reg_long);
%} %}
instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{ instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
match(Set dst (DecodeN src)); match(Set dst (DecodeN src));
effect(KILL cr);
format %{ "decode_heap_oop_not_null $dst,$src" %} format %{ "decode_heap_oop_not_null $dst,$src" %}
ins_encode %{ ins_encode %{
Register s = $src$$Register; Register s = $src$$Register;
@ -12604,7 +12593,7 @@ instruct CallStaticJavaDirect(method meth) %{
// Call Java Static Instruction (method handle version) // Call Java Static Instruction (method handle version)
// Note: If this code changes, the corresponding ret_addr_offset() and // Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted. // compute_padding() functions will have to be adjusted.
instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{ instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
match(CallStaticJava); match(CallStaticJava);
predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke()); predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth); effect(USE meth);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -232,12 +232,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _header_size); GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _instructions_offset); GEN_OFFS(CodeBlob, _instructions_offset);
GEN_OFFS(CodeBlob, _data_offset); GEN_OFFS(CodeBlob, _data_offset);
GEN_OFFS(CodeBlob, _oops_offset);
GEN_OFFS(CodeBlob, _oops_length);
GEN_OFFS(CodeBlob, _frame_size); GEN_OFFS(CodeBlob, _frame_size);
printf("\n"); printf("\n");
GEN_OFFS(nmethod, _method); GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _oops_offset);
GEN_OFFS(nmethod, _scopes_data_offset); GEN_OFFS(nmethod, _scopes_data_offset);
GEN_OFFS(nmethod, _scopes_pcs_offset); GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset); GEN_OFFS(nmethod, _handler_table_offset);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -130,7 +130,7 @@ typedef struct Nmethod_t {
int32_t scopes_data_beg; /* _scopes_data_offset */ int32_t scopes_data_beg; /* _scopes_data_offset */
int32_t scopes_data_end; int32_t scopes_data_end;
int32_t oops_beg; /* _oops_offset */ int32_t oops_beg; /* _oops_offset */
int32_t oops_len; /* _oops_length */ int32_t oops_end;
int32_t scopes_pcs_beg; /* _scopes_pcs_offset */ int32_t scopes_pcs_beg; /* _scopes_pcs_offset */
int32_t scopes_pcs_end; int32_t scopes_pcs_end;
@ -597,9 +597,9 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err); CHECK_FAIL(err);
/* Oops */ /* Oops */
err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_offset, &N->oops_beg, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_oops_offset, &N->oops_beg, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_CodeBlob_oops_length, &N->oops_len, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->oops_end, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
/* scopes_pcs */ /* scopes_pcs */
@ -624,8 +624,8 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: orig_pc_offset: %#x \n", fprintf(stderr, "\t nmethod_info: orig_pc_offset: %#x \n",
N->orig_pc_offset); N->orig_pc_offset);
fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_len: %#x\n", fprintf(stderr, "\t nmethod_info: oops_beg: %#x, oops_end: %#x\n",
N->oops_beg, N->oops_len); N->oops_beg, N->oops_end);
fprintf(stderr, "\t nmethod_info: scopes_data_beg: %#x, scopes_data_end: %#x\n", fprintf(stderr, "\t nmethod_info: scopes_data_beg: %#x, scopes_data_end: %#x\n",
N->scopes_data_beg, N->scopes_data_end); N->scopes_data_beg, N->scopes_data_end);
@ -959,8 +959,8 @@ static int scopeDesc_chain(Nmethod_t *N) {
err = scope_desc_at(N, decode_offset, vf); err = scope_desc_at(N, decode_offset, vf);
CHECK_FAIL(err); CHECK_FAIL(err);
if (vf->methodIdx > N->oops_len) { if (vf->methodIdx > ((N->oops_end - N->oops_beg) / POINTER_SIZE)) {
fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n"); fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops length) !\n");
return -1; return -1;
} }
err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE, err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,

View file

@ -510,9 +510,9 @@ class CodeBuffer: public StackObj {
copy_relocations_to(blob); copy_relocations_to(blob);
copy_code_to(blob); copy_code_to(blob);
} }
void copy_oops_to(CodeBlob* blob) { void copy_oops_to(nmethod* nm) {
if (!oop_recorder()->is_unused()) { if (!oop_recorder()->is_unused()) {
oop_recorder()->copy_to(blob); oop_recorder()->copy_to(nm);
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -150,6 +150,9 @@ class FrameMap : public CompilationResourceObj {
// Opr representing the stack_pointer on this platform // Opr representing the stack_pointer on this platform
static LIR_Opr stack_pointer(); static LIR_Opr stack_pointer();
// JSR 292
static LIR_Opr method_handle_invoke_SP_save_opr();
static BasicTypeArray* signature_type_array_for(const ciMethod* method); static BasicTypeArray* signature_type_array_for(const ciMethod* method);
static BasicTypeArray* signature_type_array_for(const char * signature); static BasicTypeArray* signature_type_array_for(const char * signature);

View file

@ -2438,13 +2438,13 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokedynamic : // fall through case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface: invoke(code); break; case Bytecodes::_invokeinterface: invoke(code); break;
case Bytecodes::_new : new_instance(s.get_index_big()); break; case Bytecodes::_new : new_instance(s.get_index_u2()); break;
case Bytecodes::_newarray : new_type_array(); break; case Bytecodes::_newarray : new_type_array(); break;
case Bytecodes::_anewarray : new_object_array(); break; case Bytecodes::_anewarray : new_object_array(); break;
case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break; case Bytecodes::_arraylength : ipush(append(new ArrayLength(apop(), lock_stack()))); break;
case Bytecodes::_athrow : throw_op(s.cur_bci()); break; case Bytecodes::_athrow : throw_op(s.cur_bci()); break;
case Bytecodes::_checkcast : check_cast(s.get_index_big()); break; case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break;
case Bytecodes::_instanceof : instance_of(s.get_index_big()); break; case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break;
// Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException // Note: we do not have special handling for the monitorenter bytecode if DeoptC1 && DeoptOnAsyncException
case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break;
case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break;

View file

@ -230,7 +230,8 @@ CodeEmitInfo::CodeEmitInfo(int bci, ValueStack* stack, XHandlers* exception_hand
, _stack(stack) , _stack(stack)
, _exception_handlers(exception_handlers) , _exception_handlers(exception_handlers)
, _next(NULL) , _next(NULL)
, _id(-1) { , _id(-1)
, _is_method_handle_invoke(false) {
assert(_stack != NULL, "must be non null"); assert(_stack != NULL, "must be non null");
assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode"); assert(_bci == SynchronizationEntryBCI || Bytecodes::is_defined(scope()->method()->java_code_at_bci(_bci)), "make sure bci points at a real bytecode");
} }
@ -241,7 +242,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
, _exception_handlers(NULL) , _exception_handlers(NULL)
, _bci(info->_bci) , _bci(info->_bci)
, _scope_debug_info(NULL) , _scope_debug_info(NULL)
, _oop_map(NULL) { , _oop_map(NULL)
, _is_method_handle_invoke(info->_is_method_handle_invoke) {
if (lock_stack_only) { if (lock_stack_only) {
if (info->_stack != NULL) { if (info->_stack != NULL) {
_stack = info->_stack->copy_locks(); _stack = info->_stack->copy_locks();
@ -259,10 +261,10 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
} }
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) { void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
// record the safepoint before recording the debug info for enclosing scopes // record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
_scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke); _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
recorder->end_safepoint(pc_offset); recorder->end_safepoint(pc_offset);
} }

View file

@ -269,6 +269,7 @@ class CodeEmitInfo: public CompilationResourceObj {
int _bci; int _bci;
CodeEmitInfo* _next; CodeEmitInfo* _next;
int _id; int _id;
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); } FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); } Compilation* compilation() const { return scope()->compilation(); }
@ -287,7 +288,8 @@ class CodeEmitInfo: public CompilationResourceObj {
, _stack(NULL) , _stack(NULL)
, _exception_handlers(NULL) , _exception_handlers(NULL)
, _next(NULL) , _next(NULL)
, _id(-1) { , _id(-1)
, _is_method_handle_invoke(false) {
} }
// make a copy // make a copy
@ -302,13 +304,16 @@ class CodeEmitInfo: public CompilationResourceObj {
int bci() const { return _bci; } int bci() const { return _bci; }
void add_register_oop(LIR_Opr opr); void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false); void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
CodeEmitInfo* next() const { return _next; } CodeEmitInfo* next() const { return _next; }
void set_next(CodeEmitInfo* next) { _next = next; } void set_next(CodeEmitInfo* next) { _next = next; }
int id() const { return _id; } int id() const { return _id; }
void set_id(int id) { _id = id; } void set_id(int id) { _id = id; }
bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
}; };

View file

@ -715,7 +715,10 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
} }
if (opJavaCall->_info) do_info(opJavaCall->_info); if (opJavaCall->_info) do_info(opJavaCall->_info);
if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr()); if (opJavaCall->is_method_handle_invoke()) {
opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
}
do_call(); do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result); if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);

View file

@ -505,15 +505,22 @@ class LIR_Address: public LIR_OprPtr {
, _type(type) , _type(type)
, _disp(0) { verify(); } , _disp(0) { verify(); }
LIR_Address(LIR_Opr base, int disp, BasicType type): LIR_Address(LIR_Opr base, intx disp, BasicType type):
_base(base) _base(base)
, _index(LIR_OprDesc::illegalOpr()) , _index(LIR_OprDesc::illegalOpr())
, _scale(times_1) , _scale(times_1)
, _type(type) , _type(type)
, _disp(disp) { verify(); } , _disp(disp) { verify(); }
LIR_Address(LIR_Opr base, BasicType type):
_base(base)
, _index(LIR_OprDesc::illegalOpr())
, _scale(times_1)
, _type(type)
, _disp(0) { verify(); }
#ifdef X86 #ifdef X86
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type): LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
_base(base) _base(base)
, _index(index) , _index(index)
, _scale(scale) , _scale(scale)
@ -1035,6 +1042,7 @@ class LIR_OpJavaCall: public LIR_OpCall {
private: private:
ciMethod* _method; ciMethod* _method;
LIR_Opr _receiver; LIR_Opr _receiver;
LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
public: public:
LIR_OpJavaCall(LIR_Code code, ciMethod* method, LIR_OpJavaCall(LIR_Code code, ciMethod* method,
@ -1043,14 +1051,18 @@ class LIR_OpJavaCall: public LIR_OpCall {
CodeEmitInfo* info) CodeEmitInfo* info)
: LIR_OpCall(code, addr, result, arguments, info) : LIR_OpCall(code, addr, result, arguments, info)
, _receiver(receiver) , _receiver(receiver)
, _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); } , _method(method)
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_OpJavaCall(LIR_Code code, ciMethod* method, LIR_OpJavaCall(LIR_Code code, ciMethod* method,
LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset, LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
LIR_OprList* arguments, CodeEmitInfo* info) LIR_OprList* arguments, CodeEmitInfo* info)
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info) : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
, _receiver(receiver) , _receiver(receiver)
, _method(method) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); } , _method(method)
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
LIR_Opr receiver() const { return _receiver; } LIR_Opr receiver() const { return _receiver; }
ciMethod* method() const { return _method; } ciMethod* method() const { return _method; }

View file

@ -301,9 +301,9 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
} }
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) { void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
flush_debug_info(pc_offset); flush_debug_info(pc_offset);
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke); cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
if (cinfo->exception_handlers() != NULL) { if (cinfo->exception_handlers() != NULL) {
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
} }
@ -413,12 +413,6 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
verify_oop_map(op->info()); verify_oop_map(op->info());
// JSR 292
// Preserve the SP over MethodHandle call sites.
if (op->is_method_handle_invoke()) {
preserve_SP(op);
}
if (os::is_MP()) { if (os::is_MP()) {
// must align calls sites, otherwise they can't be updated atomically on MP hardware // must align calls sites, otherwise they can't be updated atomically on MP hardware
align_call(op->code()); align_call(op->code());
@ -444,10 +438,6 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
if (op->is_method_handle_invoke()) {
restore_SP(op);
}
#if defined(X86) && defined(TIERED) #if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it // C2 leave fpu stack dirty clean it
if (UseSSE < 2) { if (UseSSE < 2) {

View file

@ -84,7 +84,7 @@ class LIR_Assembler: public CompilationResourceObj {
Address as_Address_hi(LIR_Address* addr); Address as_Address_hi(LIR_Address* addr);
// debug information // debug information
void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false); void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_branch(CodeEmitInfo* info); void add_debug_info_for_branch(CodeEmitInfo* info);
void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo); void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_div0_here(CodeEmitInfo* info); void add_debug_info_for_div0_here(CodeEmitInfo* info);
@ -212,10 +212,6 @@ class LIR_Assembler: public CompilationResourceObj {
void ic_call( LIR_OpJavaCall* op); void ic_call( LIR_OpJavaCall* op);
void vtable_call( LIR_OpJavaCall* op); void vtable_call( LIR_OpJavaCall* op);
// JSR 292
void preserve_SP(LIR_OpJavaCall* op);
void restore_SP( LIR_OpJavaCall* op);
void osr_entry(); void osr_entry();
void build_frame(); void build_frame();

View file

@ -1309,7 +1309,7 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
if (!addr_opr->is_address()) { if (!addr_opr->is_address()) {
assert(addr_opr->is_register(), "must be"); assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT)); addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
} }
CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
info); info);
@ -1325,7 +1325,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
new_val->as_constant_ptr()->as_jobject() == NULL) return; new_val->as_constant_ptr()->as_jobject() == NULL) return;
if (!new_val->is_register()) { if (!new_val->is_register()) {
LIR_Opr new_val_reg = new_pointer_register(); LIR_Opr new_val_reg = new_register(T_OBJECT);
if (new_val->is_constant()) { if (new_val->is_constant()) {
__ move(new_val, new_val_reg); __ move(new_val, new_val_reg);
} else { } else {
@ -1337,7 +1337,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
if (addr->is_address()) { if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr(); LIR_Address* address = addr->as_address_ptr();
LIR_Opr ptr = new_pointer_register(); LIR_Opr ptr = new_register(T_OBJECT);
if (!address->index()->is_valid() && address->disp() == 0) { if (!address->index()->is_valid() && address->disp() == 0) {
__ move(address->base(), ptr); __ move(address->base(), ptr);
} else { } else {
@ -1350,7 +1350,6 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
LIR_Opr xor_res = new_pointer_register(); LIR_Opr xor_res = new_pointer_register();
LIR_Opr xor_shift_res = new_pointer_register(); LIR_Opr xor_shift_res = new_pointer_register();
if (TwoOperandLIRForm ) { if (TwoOperandLIRForm ) {
__ move(addr, xor_res); __ move(addr, xor_res);
__ logical_xor(xor_res, new_val, xor_res); __ logical_xor(xor_res, new_val, xor_res);
@ -1368,7 +1367,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
} }
if (!new_val->is_register()) { if (!new_val->is_register()) {
LIR_Opr new_val_reg = new_pointer_register(); LIR_Opr new_val_reg = new_register(T_OBJECT);
__ leal(new_val, new_val_reg); __ leal(new_val, new_val_reg);
new_val = new_val_reg; new_val = new_val_reg;
} }
@ -1377,7 +1376,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
CodeStub* slow = new G1PostBarrierStub(addr, new_val); CodeStub* slow = new G1PostBarrierStub(addr, new_val);
__ branch(lir_cond_notEqual, T_INT, slow); __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
__ branch_destination(slow->continuation()); __ branch_destination(slow->continuation());
} }
@ -2371,9 +2370,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
bool optimized = x->target_is_loaded() && x->target_is_final(); bool optimized = x->target_is_loaded() && x->target_is_final();
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
// JSR 292
// Preserve the SP over MethodHandle call sites.
ciMethod* target = x->target();
if (target->is_method_handle_invoke()) {
info->set_is_method_handle_invoke(true);
__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
}
switch (x->code()) { switch (x->code()) {
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
__ call_static(x->target(), result_register, __ call_static(target, result_register,
SharedRuntime::get_resolve_static_call_stub(), SharedRuntime::get_resolve_static_call_stub(),
arg_list, info); arg_list, info);
break; break;
@ -2383,17 +2390,17 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// for final target we still produce an inline cache, in order // for final target we still produce an inline cache, in order
// to be able to call mixed mode // to be able to call mixed mode
if (x->code() == Bytecodes::_invokespecial || optimized) { if (x->code() == Bytecodes::_invokespecial || optimized) {
__ call_opt_virtual(x->target(), receiver, result_register, __ call_opt_virtual(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(), SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info); arg_list, info);
} else if (x->vtable_index() < 0) { } else if (x->vtable_index() < 0) {
__ call_icvirtual(x->target(), receiver, result_register, __ call_icvirtual(target, receiver, result_register,
SharedRuntime::get_resolve_virtual_call_stub(), SharedRuntime::get_resolve_virtual_call_stub(),
arg_list, info); arg_list, info);
} else { } else {
int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
__ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
} }
break; break;
case Bytecodes::_invokedynamic: { case Bytecodes::_invokedynamic: {
@ -2432,7 +2439,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// Load target MethodHandle from CallSite object. // Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(x->target(), receiver, result_register, __ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(), SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info); arg_list, info);
break; break;
@ -2442,6 +2449,12 @@ void LIRGenerator::do_Invoke(Invoke* x) {
break; break;
} }
// JSR 292
// Restore the SP after MethodHandle call sites.
if (target->is_method_handle_invoke()) {
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
}
if (x->type()->is_float() || x->type()->is_double()) { if (x->type()->is_float() || x->type()->is_double()) {
// Force rounding of results from non-strictfp when in strictfp // Force rounding of results from non-strictfp when in strictfp
// scope (or when we don't know the strictness of the callee, to // scope (or when we don't know the strictness of the callee, to

View file

@ -690,20 +690,32 @@ int ciMethod::scale_count(int count, float prof_factor) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// invokedynamic support // invokedynamic support
// ------------------------------------------------------------------
// ciMethod::is_method_handle_invoke
// //
// Return true if the method is a MethodHandle target.
bool ciMethod::is_method_handle_invoke() const { bool ciMethod::is_method_handle_invoke() const {
check_is_loaded(); bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS); methodOopDesc::is_method_handle_invoke_name(name()->sid()));
#ifdef ASSERT #ifdef ASSERT
if (is_loaded()) {
bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
{ {
VM_ENTRY_MARK; VM_ENTRY_MARK;
bool flag2 = get_methodOop()->is_method_handle_invoke(); bool flag3 = get_methodOop()->is_method_handle_invoke();
assert(flag == flag2, "consistent"); assert(flag2 == flag3, "consistent");
assert(flag == flag3, "consistent");
}
} }
#endif //ASSERT #endif //ASSERT
return flag; return flag;
} }
// ------------------------------------------------------------------
// ciMethod::is_method_handle_adapter
//
// Return true if the method is a generated MethodHandle adapter.
bool ciMethod::is_method_handle_adapter() const { bool ciMethod::is_method_handle_adapter() const {
check_is_loaded(); check_is_loaded();
VM_ENTRY_MARK; VM_ENTRY_MARK;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,27 +81,21 @@ int ciExceptionHandlerStream::count_remaining() {
// providing accessors for constant pool items. // providing accessors for constant pool items.
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciBytecodeStream::wide // ciBytecodeStream::next_wide_or_table
//
// Special handling for the wide bytcode
Bytecodes::Code ciBytecodeStream::wide()
{
// Get following bytecode; do not return wide
Bytecodes::Code bc = (Bytecodes::Code)_pc[1];
_pc += 2; // Skip both bytecodes
_pc += 2; // Skip index always
if( bc == Bytecodes::_iinc )
_pc += 2; // Skip optional constant
_was_wide = _pc; // Flag last wide bytecode found
return bc;
}
// ------------------------------------------------------------------
// ciBytecodeStream::table
// //
// Special handling for switch ops // Special handling for switch ops
Bytecodes::Code ciBytecodeStream::table( Bytecodes::Code bc ) { Bytecodes::Code ciBytecodeStream::next_wide_or_table(Bytecodes::Code bc) {
switch (bc) { // Check for special bytecode handling switch (bc) { // Check for special bytecode handling
case Bytecodes::_wide:
// Special handling for the wide bytcode
// Get following bytecode; do not return wide
assert(Bytecodes::Code(_pc[0]) == Bytecodes::_wide, "");
bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)_pc[1]);
assert(Bytecodes::wide_length_for(bc) > 2, "must make progress");
_pc += Bytecodes::wide_length_for(bc);
_was_wide = _pc; // Flag last wide bytecode found
assert(is_wide(), "accessor works right");
break;
case Bytecodes::_lookupswitch: case Bytecodes::_lookupswitch:
_pc++; // Skip wide bytecode _pc++; // Skip wide bytecode
@ -164,7 +158,7 @@ void ciBytecodeStream::force_bci(int bci) {
int ciBytecodeStream::get_klass_index() const { int ciBytecodeStream::get_klass_index() const {
switch(cur_bc()) { switch(cur_bc()) {
case Bytecodes::_ldc: case Bytecodes::_ldc:
return get_index(); return get_index_u1();
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w: case Bytecodes::_ldc2_w:
case Bytecodes::_checkcast: case Bytecodes::_checkcast:
@ -173,7 +167,7 @@ int ciBytecodeStream::get_klass_index() const {
case Bytecodes::_multianewarray: case Bytecodes::_multianewarray:
case Bytecodes::_new: case Bytecodes::_new:
case Bytecodes::_newarray: case Bytecodes::_newarray:
return get_index_big(); return get_index_u2();
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return 0; return 0;
@ -199,10 +193,10 @@ ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
int ciBytecodeStream::get_constant_index() const { int ciBytecodeStream::get_constant_index() const {
switch(cur_bc()) { switch(cur_bc()) {
case Bytecodes::_ldc: case Bytecodes::_ldc:
return get_index(); return get_index_u1();
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w: case Bytecodes::_ldc2_w:
return get_index_big(); return get_index_u2();
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return 0; return 0;
@ -239,7 +233,7 @@ int ciBytecodeStream::get_field_index() {
cur_bc() == Bytecodes::_putfield || cur_bc() == Bytecodes::_putfield ||
cur_bc() == Bytecodes::_getstatic || cur_bc() == Bytecodes::_getstatic ||
cur_bc() == Bytecodes::_putstatic, "wrong bc"); cur_bc() == Bytecodes::_putstatic, "wrong bc");
return get_index_big(); return get_index_u2_cpcache();
} }
@ -319,7 +313,9 @@ int ciBytecodeStream::get_method_index() {
ShouldNotReachHere(); ShouldNotReachHere();
} }
#endif #endif
return get_index_int(); if (has_index_u4())
return get_index_u4(); // invokedynamic
return get_index_u2_cpcache();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,14 +32,18 @@
class ciBytecodeStream : StackObj { class ciBytecodeStream : StackObj {
private: private:
// Handling for the weird bytecodes // Handling for the weird bytecodes
Bytecodes::Code wide(); // Handle wide bytecode Bytecodes::Code next_wide_or_table(Bytecodes::Code); // Handle _wide & complicated inline table
Bytecodes::Code table(Bytecodes::Code); // Handle complicated inline table
static Bytecodes::Code check_java(Bytecodes::Code c) { static Bytecodes::Code check_java(Bytecodes::Code c) {
assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes"); assert(Bytecodes::is_java_code(c), "should not return _fast bytecodes");
return c; return c;
} }
static Bytecodes::Code check_defined(Bytecodes::Code c) {
assert(Bytecodes::is_defined(c), "");
return c;
}
ciMethod* _method; // the method ciMethod* _method; // the method
ciInstanceKlass* _holder; ciInstanceKlass* _holder;
address _bc_start; // Start of current bytecode for table address _bc_start; // Start of current bytecode for table
@ -50,11 +54,21 @@ private:
address _end; // Past end of bytecodes address _end; // Past end of bytecodes
address _pc; // Current PC address _pc; // Current PC
Bytecodes::Code _bc; // Current bytecode Bytecodes::Code _bc; // Current bytecode
Bytecodes::Code _raw_bc; // Current bytecode, raw form
void reset( address base, unsigned int size ) { void reset( address base, unsigned int size ) {
_bc_start =_was_wide = 0; _bc_start =_was_wide = 0;
_start = _pc = base; _end = base + size; } _start = _pc = base; _end = base + size; }
void assert_wide(bool require_wide) const {
if (require_wide)
{ assert(is_wide(), "must be a wide instruction"); }
else { assert(!is_wide(), "must not be a wide instruction"); }
}
Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
public: public:
// End-Of-Bytecodes // End-Of-Bytecodes
static Bytecodes::Code EOBC() { static Bytecodes::Code EOBC() {
@ -97,6 +111,7 @@ public:
int instruction_size() const { return _pc - _bc_start; } int instruction_size() const { return _pc - _bc_start; }
Bytecodes::Code cur_bc() const{ return check_java(_bc); } Bytecodes::Code cur_bc() const{ return check_java(_bc); }
Bytecodes::Code cur_bc_raw() const { return check_defined(_raw_bc); }
Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); } Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
// Return current ByteCode and increment PC to next bytecode, skipping all // Return current ByteCode and increment PC to next bytecode, skipping all
@ -109,85 +124,76 @@ public:
// Fetch Java bytecode // Fetch Java bytecode
// All rewritten bytecodes maintain the size of original bytecode. // All rewritten bytecodes maintain the size of original bytecode.
_bc = Bytecodes::java_code((Bytecodes::Code)*_pc); _bc = Bytecodes::java_code(_raw_bc = (Bytecodes::Code)*_pc);
int csize = Bytecodes::length_for(_bc); // Expected size int csize = Bytecodes::length_for(_bc); // Expected size
if( _bc == Bytecodes::_wide ) {
_bc=wide(); // Handle wide bytecode
} else if( csize == 0 ) {
_bc=table(_bc); // Handle inline tables
} else {
_pc += csize; // Bump PC past bytecode _pc += csize; // Bump PC past bytecode
if (csize == 0) {
_bc = next_wide_or_table(_bc);
} }
return check_java(_bc); return check_java(_bc);
} }
bool is_wide() const { return ( _pc == _was_wide ); } bool is_wide() const { return ( _pc == _was_wide ); }
// Does this instruction contain an index which refes into the CP cache?
bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
int get_index_u1() const {
return bytecode()->get_index_u1(cur_bc_raw());
}
// Get a byte index following this bytecode. // Get a byte index following this bytecode.
// If prefixed with a wide bytecode, get a wide index. // If prefixed with a wide bytecode, get a wide index.
int get_index() const { int get_index() const {
assert_index_size(is_wide() ? 2 : 1);
return (_pc == _was_wide) // was widened? return (_pc == _was_wide) // was widened?
? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index ? get_index_u2(true) // yes, return wide index
: _bc_start[1]; // no, return narrow index : get_index_u1(); // no, return narrow index
} }
// Get 2-byte index (getfield/putstatic/etc) // Get 2-byte index (byte swapping depending on which bytecode)
int get_index_big() const { int get_index_u2(bool is_wide = false) const {
assert_index_size(2); return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
return Bytes::get_Java_u2(_bc_start+1);
} }
// Get 2-byte index (or 4-byte, for invokedynamic) // Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
int get_index_int() const { int get_index_u2_cpcache() const {
return has_giant_index() ? get_index_giant() : get_index_big(); return bytecode()->get_index_u2_cpcache(cur_bc_raw());
} }
// Get 4-byte index, for invokedynamic. // Get 4-byte index, for invokedynamic.
int get_index_giant() const { int get_index_u4() const {
assert_index_size(4); return bytecode()->get_index_u4(cur_bc_raw());
return Bytes::get_native_u4(_bc_start+1);
} }
bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); } bool has_index_u4() const {
return bytecode()->has_index_u4(cur_bc_raw());
}
// Get dimensions byte (multinewarray) // Get dimensions byte (multinewarray)
int get_dimensions() const { return *(unsigned char*)(_pc-1); } int get_dimensions() const { return *(unsigned char*)(_pc-1); }
// Sign-extended index byte/short, no widening // Sign-extended index byte/short, no widening
int get_byte() const { return (int8_t)(_pc[-1]); } int get_constant_u1() const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); } int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
int get_long() const { return (int32_t)Bytes::get_Java_u4(_pc-4); }
// Get a byte signed constant for "iinc". Invalid for other bytecodes. // Get a byte signed constant for "iinc". Invalid for other bytecodes.
// If prefixed with a wide bytecode, get a wide constant // If prefixed with a wide bytecode, get a wide constant
int get_iinc_con() const {return (_pc==_was_wide) ? get_short() :get_byte();} int get_iinc_con() const {return (_pc==_was_wide) ? (jshort) get_constant_u2(true) : (jbyte) get_constant_u1();}
// 2-byte branch offset from current pc // 2-byte branch offset from current pc
int get_dest() const { int get_dest() const {
assert( Bytecodes::length_at(_bc_start) == sizeof(jshort)+1, "get_dest called with bad bytecode" ); return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
return _bc_start-_start + (short)Bytes::get_Java_u2(_pc-2);
} }
// 2-byte branch offset from next pc // 2-byte branch offset from next pc
int next_get_dest() const { int next_get_dest() const {
address next_bc_start = _pc;
assert(_pc < _end, ""); assert(_pc < _end, "");
Bytecodes::Code next_bc = (Bytecodes::Code)*_pc; return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
assert( next_bc != Bytecodes::_wide, "");
int next_csize = Bytecodes::length_for(next_bc);
assert( next_csize != 0, "" );
assert( next_bc <= Bytecodes::_jsr_w, "");
address next_pc = _pc + next_csize;
assert( Bytecodes::length_at(next_bc_start) == sizeof(jshort)+1, "next_get_dest called with bad bytecode" );
return next_bc_start-_start + (short)Bytes::get_Java_u2(next_pc-2);
} }
// 4-byte branch offset from current pc // 4-byte branch offset from current pc
int get_far_dest() const { int get_far_dest() const {
assert( Bytecodes::length_at(_bc_start) == sizeof(jint)+1, "dest4 called with bad bytecode" ); return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
return _bc_start-_start + (int)Bytes::get_Java_u4(_pc-4);
} }
// For a lookup or switch table, return target destination // For a lookup or switch table, return target destination
@ -234,22 +240,6 @@ public:
ciCPCache* get_cpcache(); ciCPCache* get_cpcache();
ciCallSite* get_call_site(); ciCallSite* get_call_site();
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
if (isize == 2 && cur_bc() == Bytecodes::_iinc)
isize = 1;
else if (isize <= 2)
; // no change
else if (has_giant_index())
isize = 4;
else
isize = 2;
assert(isize = required_size, "wrong index size");
#endif
}
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2132,6 +2132,7 @@ bool ciTypeFlow::can_trap(ciBytecodeStream& str) {
if (!Bytecodes::can_trap(str.cur_bc())) return false; if (!Bytecodes::can_trap(str.cur_bc())) return false;
switch (str.cur_bc()) { switch (str.cur_bc()) {
// %%% FIXME: ldc of Class can generate an exception
case Bytecodes::_ldc: case Bytecodes::_ldc:
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w: case Bytecodes::_ldc2_w:

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -257,6 +257,9 @@ void ClassVerifier::verify_class(TRAPS) {
int num_methods = methods->length(); int num_methods = methods->length();
for (int index = 0; index < num_methods; index++) { for (int index = 0; index < num_methods; index++) {
// Check for recursive re-verification before each method.
if (was_recursively_verified()) return;
methodOop m = (methodOop)methods->obj_at(index); methodOop m = (methodOop)methods->obj_at(index);
if (m->is_native() || m->is_abstract()) { if (m->is_native() || m->is_abstract()) {
// If m is native or abstract, skip it. It is checked in class file // If m is native or abstract, skip it. It is checked in class file
@ -265,6 +268,12 @@ void ClassVerifier::verify_class(TRAPS) {
} }
verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this)); verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
} }
if (_verify_verbose || TraceClassInitialization) {
if (was_recursively_verified())
tty->print_cr("Recursive verification detected for: %s",
_klass->external_name());
}
} }
void ClassVerifier::verify_method(methodHandle m, TRAPS) { void ClassVerifier::verify_method(methodHandle m, TRAPS) {
@ -329,6 +338,9 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
// instruction in sequence // instruction in sequence
Bytecodes::Code opcode; Bytecodes::Code opcode;
while (!bcs.is_last_bytecode()) { while (!bcs.is_last_bytecode()) {
// Check for recursive re-verification before each bytecode.
if (was_recursively_verified()) return;
opcode = bcs.raw_next(); opcode = bcs.raw_next();
u2 bci = bcs.bci(); u2 bci = bcs.bci();
@ -413,13 +425,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_ldc : case Bytecodes::_ldc :
verify_ldc( verify_ldc(
opcode, bcs.get_index(), &current_frame, opcode, bcs.get_index_u1(), &current_frame,
cp, bci, CHECK_VERIFY(this)); cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_ldc_w : case Bytecodes::_ldc_w :
case Bytecodes::_ldc2_w : case Bytecodes::_ldc2_w :
verify_ldc( verify_ldc(
opcode, bcs.get_index_big(), &current_frame, opcode, bcs.get_index_u2(), &current_frame,
cp, bci, CHECK_VERIFY(this)); cp, bci, CHECK_VERIFY(this));
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_iload : case Bytecodes::_iload :
@ -1185,7 +1197,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_new : case Bytecodes::_new :
{ {
index = bcs.get_index_big(); index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this)); verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_class_type = VerificationType new_class_type =
cp_index_to_type(index, cp, CHECK_VERIFY(this)); cp_index_to_type(index, cp, CHECK_VERIFY(this));
@ -1205,7 +1217,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_anewarray : case Bytecodes::_anewarray :
verify_anewarray( verify_anewarray(
bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this)); bcs.get_index_u2(), cp, &current_frame, CHECK_VERIFY(this));
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_arraylength : case Bytecodes::_arraylength :
type = current_frame.pop_stack( type = current_frame.pop_stack(
@ -1218,7 +1230,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_checkcast : case Bytecodes::_checkcast :
{ {
index = bcs.get_index_big(); index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this)); verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack( current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this)); VerificationType::reference_check(), CHECK_VERIFY(this));
@ -1228,7 +1240,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break; no_control_flow = false; break;
} }
case Bytecodes::_instanceof : { case Bytecodes::_instanceof : {
index = bcs.get_index_big(); index = bcs.get_index_u2();
verify_cp_class_type(index, cp, CHECK_VERIFY(this)); verify_cp_class_type(index, cp, CHECK_VERIFY(this));
current_frame.pop_stack( current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this)); VerificationType::reference_check(), CHECK_VERIFY(this));
@ -1243,7 +1255,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_multianewarray : case Bytecodes::_multianewarray :
{ {
index = bcs.get_index_big(); index = bcs.get_index_u2();
u2 dim = *(bcs.bcp()+3); u2 dim = *(bcs.bcp()+3);
verify_cp_class_type(index, cp, CHECK_VERIFY(this)); verify_cp_class_type(index, cp, CHECK_VERIFY(this));
VerificationType new_array_type = VerificationType new_array_type =
@ -1302,7 +1314,7 @@ char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
while (!bcs.is_last_bytecode()) { while (!bcs.is_last_bytecode()) {
if (bcs.raw_next() != Bytecodes::_illegal) { if (bcs.raw_next() != Bytecodes::_illegal) {
int bci = bcs.bci(); int bci = bcs.bci();
if (bcs.code() == Bytecodes::_new) { if (bcs.raw_code() == Bytecodes::_new) {
code_data[bci] = NEW_OFFSET; code_data[bci] = NEW_OFFSET;
} else { } else {
code_data[bci] = BYTECODE_OFFSET; code_data[bci] = BYTECODE_OFFSET;
@ -1473,20 +1485,9 @@ void ClassVerifier::verify_cp_type(
// In some situations, bytecode rewriting may occur while we're verifying. // In some situations, bytecode rewriting may occur while we're verifying.
// In this case, a constant pool cache exists and some indices refer to that // In this case, a constant pool cache exists and some indices refer to that
// instead. Get the original index for the tag check // instead. Be sure we don't pick up such indices by accident.
constantPoolCacheOop cache = cp->cache(); // We must check was_recursively_verified() before we get here.
if (cache != NULL && guarantee(cp->cache() == NULL, "not rewritten yet");
((types == (1 << JVM_CONSTANT_InterfaceMethodref)) ||
(types == (1 << JVM_CONSTANT_Methodref)) ||
(types == (1 << JVM_CONSTANT_Fieldref)))) {
int native_index = index;
if (Bytes::is_Java_byte_ordering_different()) {
native_index = Bytes::swap_u2(index);
}
assert((native_index >= 0) && (native_index < cache->length()),
"Must be a legal index into the cp cache");
index = cache->entry_at(native_index)->constant_pool_index();
}
verify_cp_index(cp, index, CHECK_VERIFY(this)); verify_cp_index(cp, index, CHECK_VERIFY(this));
unsigned int tag = cp->tag_at(index).value(); unsigned int tag = cp->tag_at(index).value();
@ -1657,7 +1658,7 @@ void ClassVerifier::verify_switch(
int keys, delta; int keys, delta;
current_frame->pop_stack( current_frame->pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this)); VerificationType::integer_type(), CHECK_VERIFY(this));
if (bcs->code() == Bytecodes::_tableswitch) { if (bcs->raw_code() == Bytecodes::_tableswitch) {
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize); jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize); jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
if (low > high) { if (low > high) {
@ -1713,7 +1714,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
StackMapFrame* current_frame, StackMapFrame* current_frame,
constantPoolHandle cp, constantPoolHandle cp,
TRAPS) { TRAPS) {
u2 index = bcs->get_index_big(); u2 index = bcs->get_index_u2();
verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this)); verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
// Get field name and signature // Get field name and signature
@ -1753,7 +1754,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
&sig_stream, field_type, CHECK_VERIFY(this)); &sig_stream, field_type, CHECK_VERIFY(this));
u2 bci = bcs->bci(); u2 bci = bcs->bci();
bool is_assignable; bool is_assignable;
switch (bcs->code()) { switch (bcs->raw_code()) {
case Bytecodes::_getstatic: { case Bytecodes::_getstatic: {
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
current_frame->push_stack(field_type[i], CHECK_VERIFY(this)); current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
@ -1873,7 +1874,7 @@ void ClassVerifier::verify_invoke_init(
ref_class_type.name(), CHECK_VERIFY(this)); ref_class_type.name(), CHECK_VERIFY(this));
methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method( methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(), vmSymbols::object_initializer_name(),
cp->signature_ref_at(bcs->get_index_big())); cp->signature_ref_at(bcs->get_index_u2()));
instanceKlassHandle mh(THREAD, m->method_holder()); instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) { if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from( bool assignable = current_type().is_assignable_from(
@ -1896,8 +1897,8 @@ void ClassVerifier::verify_invoke_instructions(
bool *this_uninit, VerificationType return_type, bool *this_uninit, VerificationType return_type,
constantPoolHandle cp, TRAPS) { constantPoolHandle cp, TRAPS) {
// Make sure the constant pool item is the right type // Make sure the constant pool item is the right type
u2 index = bcs->get_index_big(); u2 index = bcs->get_index_u2();
Bytecodes::Code opcode = bcs->code(); Bytecodes::Code opcode = bcs->raw_code();
unsigned int types = (opcode == Bytecodes::_invokeinterface unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref ? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic : opcode == Bytecodes::_invokedynamic

View file

@ -158,6 +158,16 @@ class ClassVerifier : public StackObj {
methodHandle _method; // current method being verified methodHandle _method; // current method being verified
VerificationType _this_type; // the verification type of the current class VerificationType _this_type; // the verification type of the current class
// Some recursive calls from the verifier to the name resolver
// can cause the current class to be re-verified and rewritten.
// If this happens, the original verification should not continue,
// because constant pool indexes will have changed.
// The rewriter is preceded by the verifier. If the verifier throws
// an error, rewriting is prevented. Also, rewriting always precedes
// bytecode execution or compilation. Thus, is_rewritten implies
// that a class has been verified and prepared for execution.
bool was_recursively_verified() { return _klass->is_rewritten(); }
public: public:
enum { enum {
BYTECODE_OFFSET = 1, BYTECODE_OFFSET = 1,

View file

@ -66,8 +66,6 @@ CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_comple
_relocation_size = locs_size; _relocation_size = locs_size;
_instructions_offset = align_code_offset(header_size + locs_size); _instructions_offset = align_code_offset(header_size + locs_size);
_data_offset = size; _data_offset = size;
_oops_offset = size;
_oops_length = 0;
_frame_size = 0; _frame_size = 0;
set_oop_maps(NULL); set_oop_maps(NULL);
} }
@ -94,9 +92,6 @@ CodeBlob::CodeBlob(
_relocation_size = round_to(cb->total_relocation_size(), oopSize); _relocation_size = round_to(cb->total_relocation_size(), oopSize);
_instructions_offset = align_code_offset(header_size + _relocation_size); _instructions_offset = align_code_offset(header_size + _relocation_size);
_data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize); _data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize);
_oops_offset = _size - round_to(cb->total_oop_size(), oopSize);
_oops_length = 0; // temporary, until the copy_oops handshake
assert(_oops_offset >= _data_offset, "codeBlob is too small");
assert(_data_offset <= size, "codeBlob is too small"); assert(_data_offset <= size, "codeBlob is too small");
cb->copy_code_and_locs_to(this); cb->copy_code_and_locs_to(this);
@ -131,99 +126,6 @@ void CodeBlob::flush() {
} }
// Promote one word from an assembly-time handle to a live embedded oop.
inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL ||
// As a special case, IC oops are initialized to 1 or -1.
handle == (jobject) Universe::non_oop_word()) {
(*dest) = (oop)handle;
} else {
(*dest) = JNIHandles::resolve_non_null(handle);
}
}
void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
assert(_oops_length == 0, "do this handshake just once, please");
int length = array->length();
assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
oop* dest = oops_begin();
for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
}
_oops_length = length;
// Now we can fix up all the oops in the code.
// We need to do this in the code because
// the assembler uses jobjects as placeholders.
// The code and relocations have already been
// initialized by the CodeBlob constructor,
// so it is valid even at this early point to
// iterate over relocations and patch the code.
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
return (relocInfo::relocType) iter.type();
}
// No relocation info found for pc
ShouldNotReachHere();
return relocInfo::none; // dummy return value
}
bool CodeBlob::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool CodeBlob::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void CodeBlob::fix_oop_relocations(address begin, address end,
bool initialize_immediates) {
// re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (initialize_immediates && reloc->oop_is_immediate()) {
oop* dest = reloc->oop_addr();
initialize_immediate_oop(dest, (jobject) *dest);
}
// Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
}
// There must not be any interfering patches or breakpoints.
assert(!(iter.type() == relocInfo::breakpoint_type
&& iter.breakpoint_reloc()->active()),
"no active breakpoint");
}
}
void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) {
ShouldNotReachHere();
}
OopMap* CodeBlob::oop_map_for_return_address(address return_address) { OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
address pc = return_address ; address pc = return_address ;
assert (oop_maps() != NULL, "nope"); assert (oop_maps() != NULL, "nope");

View file

@ -54,17 +54,12 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// that range. There is a similar range(s) on returns // that range. There is a similar range(s) on returns
// which we don't detect. // which we don't detect.
int _data_offset; // offset to where data region begins int _data_offset; // offset to where data region begins
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _oops_length; // number of embedded oops
int _frame_size; // size of stack frame int _frame_size; // size of stack frame
OopMapSet* _oop_maps; // OopMap for this CodeBlob OopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeComments _comments; CodeComments _comments;
friend class OopRecorder; friend class OopRecorder;
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
inline void initialize_immediate_oop(oop* dest, jobject handle);
public: public:
// Returns the space needed for CodeBlob // Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size); static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@ -115,14 +110,11 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
address instructions_end() const { return (address) header_begin() + _data_offset; } address instructions_end() const { return (address) header_begin() + _data_offset; }
address data_begin() const { return (address) header_begin() + _data_offset; } address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; } address data_end() const { return (address) header_begin() + _size; }
oop* oops_begin() const { return (oop*) (header_begin() + _oops_offset); }
oop* oops_end() const { return oops_begin() + _oops_length; }
// Offsets // Offsets
int relocation_offset() const { return _header_size; } int relocation_offset() const { return _header_size; }
int instructions_offset() const { return _instructions_offset; } int instructions_offset() const { return _instructions_offset; }
int data_offset() const { return _data_offset; } int data_offset() const { return _data_offset; }
int oops_offset() const { return _oops_offset; }
// Sizes // Sizes
int size() const { return _size; } int size() const { return _size; }
@ -130,40 +122,16 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); } int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
int instructions_size() const { return instructions_end() - instructions_begin(); } int instructions_size() const { return instructions_end() - instructions_begin(); }
int data_size() const { return data_end() - data_begin(); } int data_size() const { return data_end() - data_begin(); }
int oops_size() const { return (address) oops_end() - (address) oops_begin(); }
// Containment // Containment
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); } bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); } bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); } bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); } bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); }
bool oops_contains(oop* addr) const { return oops_begin() <= addr && addr < oops_end(); }
bool contains(address addr) const { return instructions_contains(addr); } bool contains(address addr) const { return instructions_contains(addr); }
bool is_frame_complete_at(address addr) const { return instructions_contains(addr) && bool is_frame_complete_at(address addr) const { return instructions_contains(addr) &&
addr >= instructions_begin() + _frame_complete_offset; } addr >= instructions_begin() + _frame_complete_offset; }
// Relocation support
void fix_oop_relocations(address begin, address end) {
fix_oop_relocations(begin, end, false);
}
void fix_oop_relocations() {
fix_oop_relocations(NULL, NULL, false);
}
relocInfo::relocType reloc_type_for_address(address pc);
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
// Support for oops in scopes and relocs:
// Note: index 0 is reserved for null.
oop oop_at(int index) const { return index == 0? (oop)NULL: *oop_addr_at(index); }
oop* oop_addr_at(int index) const{ // for GC
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
return &oops_begin()[index-1];
}
void copy_oops(GrowableArray<jobject>* oops);
// CodeCache support: really only used by the nmethods, but in order to get // CodeCache support: really only used by the nmethods, but in order to get
// asserts and certain bookkeeping to work in the CodeCache they are defined // asserts and certain bookkeeping to work in the CodeCache they are defined
// virtual here. // virtual here.
@ -175,12 +143,6 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
// GC support // GC support
virtual bool is_alive() const = 0; virtual bool is_alive() const = 0;
virtual void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred);
virtual void oops_do(OopClosure* f) = 0;
// (All CodeBlob subtypes other than NMethod currently have
// an empty oops_do() method.
// OopMap for frame // OopMap for frame
OopMapSet* oop_maps() const { return _oop_maps; } OopMapSet* oop_maps() const { return _oop_maps; }
@ -245,11 +207,6 @@ class BufferBlob: public CodeBlob {
// GC/Verification support // GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; } bool is_alive() const { return true; }
void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) { /* do nothing */ }
void oops_do(OopClosure* f) { /* do nothing*/ }
void verify(); void verify();
void print() const PRODUCT_RETURN; void print() const PRODUCT_RETURN;
@ -334,10 +291,6 @@ class RuntimeStub: public CodeBlob {
// GC/Verification support // GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
bool is_alive() const { return true; } bool is_alive() const { return true; }
void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) { /* do nothing */ }
void oops_do(OopClosure* f) { /* do-nothing*/ }
void verify(); void verify();
void print() const PRODUCT_RETURN; void print() const PRODUCT_RETURN;
@ -363,9 +316,6 @@ class SingletonBlob: public CodeBlob {
{}; {};
bool is_alive() const { return true; } bool is_alive() const { return true; }
void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred) { /* do-nothing*/ }
void verify(); // does nothing void verify(); // does nothing
void print() const PRODUCT_RETURN; void print() const PRODUCT_RETURN;
@ -423,9 +373,6 @@ class DeoptimizationBlob: public SingletonBlob {
// GC for args // GC for args
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
// Iteration
void oops_do(OopClosure* f) {}
// Printing // Printing
void print_value_on(outputStream* st) const PRODUCT_RETURN; void print_value_on(outputStream* st) const PRODUCT_RETURN;
@ -477,9 +424,6 @@ class UncommonTrapBlob: public SingletonBlob {
// Typing // Typing
bool is_uncommon_trap_stub() const { return true; } bool is_uncommon_trap_stub() const { return true; }
// Iteration
void oops_do(OopClosure* f) {}
}; };
@ -512,9 +456,6 @@ class ExceptionBlob: public SingletonBlob {
// Typing // Typing
bool is_exception_stub() const { return true; } bool is_exception_stub() const { return true; }
// Iteration
void oops_do(OopClosure* f) {}
}; };
#endif // COMPILER2 #endif // COMPILER2
@ -548,7 +489,4 @@ class SafepointBlob: public SingletonBlob {
// Typing // Typing
bool is_safepoint_stub() const { return true; } bool is_safepoint_stub() const { return true; }
// Iteration
void oops_do(OopClosure* f) {}
}; };

View file

@ -74,12 +74,12 @@ class CodeBlob_sizes {
total_size += cb->size(); total_size += cb->size();
header_size += cb->header_size(); header_size += cb->header_size();
relocation_size += cb->relocation_size(); relocation_size += cb->relocation_size();
scopes_oop_size += cb->oops_size();
if (cb->is_nmethod()) { if (cb->is_nmethod()) {
nmethod *nm = (nmethod*)cb; nmethod* nm = cb->as_nmethod_or_null();
code_size += nm->code_size(); code_size += nm->code_size();
stub_size += nm->stub_size(); stub_size += nm->stub_size();
scopes_oop_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size(); scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size(); scopes_pcs_size += nm->scopes_pcs_size();
} else { } else {
@ -262,14 +262,14 @@ int CodeCache::alignment_offset() {
} }
// Mark code blobs for unloading if they contain otherwise // Mark nmethods for unloading if they contain otherwise unreachable
// unreachable oops. // oops.
void CodeCache::do_unloading(BoolObjectClosure* is_alive, void CodeCache::do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive, OopClosure* keep_alive,
bool unloading_occurred) { bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_BLOBS(cb) { FOR_ALL_ALIVE_NMETHODS(nm) {
cb->do_unloading(is_alive, keep_alive, unloading_occurred); nm->do_unloading(is_alive, keep_alive, unloading_occurred);
} }
} }
@ -509,9 +509,9 @@ void CodeCache::gc_epilogue() {
if (needs_cache_clean()) { if (needs_cache_clean()) {
nm->cleanup_inline_caches(); nm->cleanup_inline_caches();
} }
debug_only(nm->verify();) DEBUG_ONLY(nm->verify());
nm->fix_oop_relocations();
} }
cb->fix_oop_relocations();
} }
set_needs_cache_clean(false); set_needs_cache_clean(false);
prune_scavenge_root_nmethods(); prune_scavenge_root_nmethods();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -441,11 +441,11 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
} }
inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) { inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
address first_oop = NULL; address first_oop = NULL;
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter. // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
CodeBlob *code1 = code; nmethod* tmp_nm = nm;
return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized); return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
} }
CompiledIC::CompiledIC(NativeCall* ic_call) CompiledIC::CompiledIC(NativeCall* ic_call)

View file

@ -99,12 +99,12 @@ struct nmethod_stats_struct {
code_size += nm->code_size(); code_size += nm->code_size();
stub_size += nm->stub_size(); stub_size += nm->stub_size();
consts_size += nm->consts_size(); consts_size += nm->consts_size();
oops_size += nm->oops_size();
scopes_data_size += nm->scopes_data_size(); scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size(); scopes_pcs_size += nm->scopes_pcs_size();
dependencies_size += nm->dependencies_size(); dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size(); handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size(); nul_chk_table_size += nm->nul_chk_table_size();
oops_size += nm->oops_size();
} }
void print_nmethod_stats() { void print_nmethod_stats() {
if (nmethod_count == 0) return; if (nmethod_count == 0) return;
@ -114,12 +114,12 @@ struct nmethod_stats_struct {
if (code_size != 0) tty->print_cr(" main code = %d", code_size); if (code_size != 0) tty->print_cr(" main code = %d", code_size);
if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size);
if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); if (consts_size != 0) tty->print_cr(" constants = %d", consts_size);
if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);
if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
} }
int native_nmethod_count; int native_nmethod_count;
@ -600,7 +600,8 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H #endif // def HAVE_DTRACE_H
_stub_offset = data_offset(); _stub_offset = data_offset();
_consts_offset = data_offset(); _consts_offset = data_offset();
_scopes_data_offset = data_offset(); _oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset; _scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset; _dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset; _handler_table_offset = _dependencies_offset;
@ -690,7 +691,8 @@ nmethod::nmethod(
_orig_pc_offset = 0; _orig_pc_offset = 0;
_stub_offset = data_offset(); _stub_offset = data_offset();
_consts_offset = data_offset(); _consts_offset = data_offset();
_scopes_data_offset = data_offset(); _oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_pcs_offset = _scopes_data_offset; _scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset; _dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset; _handler_table_offset = _dependencies_offset;
@ -805,7 +807,8 @@ nmethod::nmethod(
_unwind_handler_offset = -1; _unwind_handler_offset = -1;
} }
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start()); _consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
_scopes_data_offset = data_offset(); _oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize);
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);
@ -990,6 +993,79 @@ void nmethod::set_version(int v) {
} }
// Promote one word from an assembly-time handle to a live embedded oop.
inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL ||
// As a special case, IC oops are initialized to 1 or -1.
handle == (jobject) Universe::non_oop_word()) {
(*dest) = (oop) handle;
} else {
(*dest) = JNIHandles::resolve_non_null(handle);
}
}
void nmethod::copy_oops(GrowableArray<jobject>* array) {
//assert(oops_size() == 0, "do this handshake just once, please");
int length = array->length();
assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
oop* dest = oops_begin();
for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
}
// Now we can fix up all the oops in the code. We need to do this
// in the code because the assembler uses jobjects as placeholders.
// The code and relocations have already been initialized by the
// CodeBlob constructor, so it is valid even at this early point to
// iterate over relocations and patch the code.
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
bool nmethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool nmethod::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
// re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (initialize_immediates && reloc->oop_is_immediate()) {
oop* dest = reloc->oop_addr();
initialize_immediate_oop(dest, (jobject) *dest);
}
// Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
}
// There must not be any interfering patches or breakpoints.
assert(!(iter.type() == relocInfo::breakpoint_type
&& iter.breakpoint_reloc()->active()),
"no active breakpoint");
}
}
ScopeDesc* nmethod::scope_desc_at(address pc) { ScopeDesc* nmethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc); PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present"); guarantee(pd != NULL, "scope must be present");
@ -1266,19 +1342,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// and it hasn't already been reported for this nmethod then report it now. // and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading). // (the event may have been reported earilier if the GC marked it for unloading).
if (state == zombie) { if (state == zombie) {
post_compiled_method_unload();
DTRACE_METHOD_UNLOAD_PROBE(method());
if (JvmtiExport::should_post_compiled_method_unload() &&
!unload_reported()) {
assert(method() != NULL, "checking");
{
HandleMark hm;
JvmtiExport::post_compiled_method_unload_at_safepoint(
method()->jmethod_id(), code_begin());
}
set_unload_reported();
}
} }
@ -1430,6 +1494,12 @@ void nmethod::post_compiled_method_load_event() {
} }
void nmethod::post_compiled_method_unload() { void nmethod::post_compiled_method_unload() {
if (unload_reported()) {
// During unloading we transition to unloaded and then to zombie
// and the unloading is reported during the first transition.
return;
}
assert(_method != NULL && !is_unloaded(), "just checking"); assert(_method != NULL && !is_unloaded(), "just checking");
DTRACE_METHOD_UNLOAD_PROBE(method()); DTRACE_METHOD_UNLOAD_PROBE(method());
@ -1439,8 +1509,7 @@ void nmethod::post_compiled_method_unload() {
if (JvmtiExport::should_post_compiled_method_unload()) { if (JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded"); assert(!unload_reported(), "already unloaded");
HandleMark hm; HandleMark hm;
JvmtiExport::post_compiled_method_unload_at_safepoint( JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
method()->jmethod_id(), code_begin());
} }
// The JVMTI CompiledMethodUnload event can be enabled or disabled at // The JVMTI CompiledMethodUnload event can be enabled or disabled at
@ -2282,6 +2351,10 @@ void nmethod::print() const {
consts_begin(), consts_begin(),
consts_end(), consts_end(),
consts_size()); consts_size());
if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
oops_begin(),
oops_end(),
oops_size());
if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
scopes_data_begin(), scopes_data_begin(),
scopes_data_end(), scopes_data_end(),

View file

@ -105,6 +105,7 @@ struct nmFlags {
// [Relocation] // [Relocation]
// - relocation information // - relocation information
// - constant part (doubles, longs and floats used in nmethod) // - constant part (doubles, longs and floats used in nmethod)
// - oop table
// [Code] // [Code]
// - code body // - code body
// - exception handler // - exception handler
@ -161,6 +162,7 @@ class nmethod : public CodeBlob {
#endif // def HAVE_DTRACE_H #endif // def HAVE_DTRACE_H
int _stub_offset; int _stub_offset;
int _consts_offset; int _consts_offset;
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _scopes_data_offset; int _scopes_data_offset;
int _scopes_pcs_offset; int _scopes_pcs_offset;
int _dependencies_offset; int _dependencies_offset;
@ -347,7 +349,10 @@ class nmethod : public CodeBlob {
address stub_begin () const { return header_begin() + _stub_offset ; } address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _consts_offset ; } address stub_end () const { return header_begin() + _consts_offset ; }
address consts_begin () const { return header_begin() + _consts_offset ; } address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return header_begin() + _scopes_data_offset ; } address consts_end () const { return header_begin() + _oops_offset ; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
@ -359,9 +364,11 @@ class nmethod : public CodeBlob {
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
// Sizes
int code_size () const { return code_end () - code_begin (); } int code_size () const { return code_end () - code_begin (); }
int stub_size () const { return stub_end () - stub_begin (); } int stub_size () const { return stub_end () - stub_begin (); }
int consts_size () const { return consts_end () - consts_begin (); } int consts_size () const { return consts_end () - consts_begin (); }
int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); } int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); } int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); } int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
@ -370,9 +377,11 @@ class nmethod : public CodeBlob {
int total_size () const; int total_size () const;
// Containment
bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); } bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
@ -431,6 +440,29 @@ class nmethod : public CodeBlob {
int version() const { return flags.version; } int version() const { return flags.version; }
void set_version(int v); void set_version(int v);
// Support for oops in scopes and relocs:
// Note: index 0 is reserved for null.
oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
oop* oop_addr_at(int index) const { // for GC
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
return &oops_begin()[index - 1];
}
void copy_oops(GrowableArray<jobject>* oops);
// Relocation support
private:
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
inline void initialize_immediate_oop(oop* dest, jobject handle);
public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
// Non-perm oop support // Non-perm oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
protected: protected:
@ -511,7 +543,7 @@ class nmethod : public CodeBlob {
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f); OopClosure* f);
virtual void oops_do(OopClosure* f) { oops_do(f, false); } void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool do_strong_roots_only); void oops_do(OopClosure* f, bool do_strong_roots_only);
bool detect_scavenge_root_oops(); bool detect_scavenge_root_oops();
void verify_scavenge_root_oops() PRODUCT_RETURN; void verify_scavenge_root_oops() PRODUCT_RETURN;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,10 +50,10 @@ int OopRecorder::oop_size() {
return _handles->length() * sizeof(oop); return _handles->length() * sizeof(oop);
} }
void OopRecorder::copy_to(CodeBlob* code) { void OopRecorder::copy_to(nmethod* nm) {
assert(_complete, "must be frozen"); assert(_complete, "must be frozen");
maybe_initialize(); // get non-null handles, even if we have no oops maybe_initialize(); // get non-null handles, even if we have no oops
code->copy_oops(_handles); nm->copy_oops(_handles);
} }
void OopRecorder::maybe_initialize() { void OopRecorder::maybe_initialize() {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,8 +70,8 @@ class OopRecorder : public ResourceObj {
return _handles->length() + first_index; return _handles->length() + first_index;
} }
// copy the generated oop table to CodeBlob // copy the generated oop table to nmethod
void copy_to(CodeBlob* code); // => code->copy_oops(_handles) void copy_to(nmethod* nm); // => nm->copy_oops(_handles)
bool is_unused() { return _handles == NULL && !_complete; } bool is_unused() { return _handles == NULL && !_complete; }
#ifdef ASSERT #ifdef ASSERT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -115,24 +115,25 @@ void relocInfo::remove_reloc_info_for_address(RelocIterator *itr, address pc, re
// ---------------------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator // Implementation of RelocIterator
void RelocIterator::initialize(CodeBlob* cb, address begin, address limit) { void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
initialize_misc(); initialize_misc();
if (cb == NULL && begin != NULL) { if (nm == NULL && begin != NULL) {
// allow CodeBlob to be deduced from beginning address // allow nmethod to be deduced from beginning address
cb = CodeCache::find_blob(begin); CodeBlob* cb = CodeCache::find_blob(begin);
nm = cb->as_nmethod_or_null();
} }
assert(cb != NULL, "must be able to deduce nmethod from other arguments"); assert(nm != NULL, "must be able to deduce nmethod from other arguments");
_code = cb; _code = nm;
_current = cb->relocation_begin()-1; _current = nm->relocation_begin() - 1;
_end = cb->relocation_end(); _end = nm->relocation_end();
_addr = (address) cb->instructions_begin(); _addr = (address) nm->instructions_begin();
assert(!has_current(), "just checking"); assert(!has_current(), "just checking");
address code_end = cb->instructions_end(); address code_end = nm->instructions_end();
assert(begin == NULL || begin >= cb->instructions_begin(), "in bounds"); assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds");
// FIX THIS assert(limit == NULL || limit <= code_end, "in bounds"); // FIX THIS assert(limit == NULL || limit <= code_end, "in bounds");
set_limits(begin, limit); set_limits(begin, limit);
} }
@ -754,7 +755,7 @@ oop* oop_Relocation::oop_addr() {
// oop is stored in the code stream // oop is stored in the code stream
return (oop*) pd_address_in_code(); return (oop*) pd_address_in_code();
} else { } else {
// oop is stored in table at CodeBlob::oops_begin // oop is stored in table at nmethod::oops_begin
return code()->oop_addr_at(n); return code()->oop_addr_at(n);
} }
} }
@ -776,26 +777,28 @@ void oop_Relocation::fix_oop_relocation() {
} }
RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
oop* &oop_addr, bool *is_optimized) { oop* &oop_addr, bool *is_optimized) {
assert(ic_call != NULL, "ic_call address must be set"); assert(ic_call != NULL, "ic_call address must be set");
assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input"); assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
if (code == NULL) { if (nm == NULL) {
CodeBlob* code;
if (ic_call != NULL) { if (ic_call != NULL) {
code = CodeCache::find_blob(ic_call); code = CodeCache::find_blob(ic_call);
} else if (first_oop != NULL) { } else if (first_oop != NULL) {
code = CodeCache::find_blob(first_oop); code = CodeCache::find_blob(first_oop);
} }
assert(code != NULL, "address to parse must be in CodeBlob"); nm = code->as_nmethod_or_null();
assert(nm != NULL, "address to parse must be in nmethod");
} }
assert(ic_call == NULL || code->contains(ic_call), "must be in CodeBlob"); assert(ic_call == NULL || nm->contains(ic_call), "must be in nmethod");
assert(first_oop == NULL || code->contains(first_oop), "must be in CodeBlob"); assert(first_oop == NULL || nm->contains(first_oop), "must be in nmethod");
address oop_limit = NULL; address oop_limit = NULL;
if (ic_call != NULL) { if (ic_call != NULL) {
// search for the ic_call at the given address // search for the ic_call at the given address
RelocIterator iter(code, ic_call, ic_call+1); RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next(); bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address"); assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call"); assert(iter.addr() == ic_call, "must find ic_call");
@ -814,7 +817,7 @@ RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_cal
} }
// search for the first_oop, to get its oop_addr // search for the first_oop, to get its oop_addr
RelocIterator all_oops(code, first_oop); RelocIterator all_oops(nm, first_oop);
RelocIterator iter = all_oops; RelocIterator iter = all_oops;
iter.set_limit(first_oop+1); iter.set_limit(first_oop+1);
bool found_oop = false; bool found_oop = false;
@ -842,7 +845,7 @@ RelocIterator virtual_call_Relocation::parse_ic(CodeBlob* &code, address &ic_cal
} }
} }
guarantee(!did_reset, "cannot find ic_call"); guarantee(!did_reset, "cannot find ic_call");
iter = RelocIterator(code); // search the whole CodeBlob iter = RelocIterator(nm); // search the whole nmethod
did_reset = true; did_reset = true;
} }
@ -1175,9 +1178,9 @@ void RelocIterator::print() {
// For the debugger: // For the debugger:
extern "C" extern "C"
void print_blob_locs(CodeBlob* cb) { void print_blob_locs(nmethod* nm) {
cb->print(); nm->print();
RelocIterator iter(cb); RelocIterator iter(nm);
iter.print(); iter.print();
} }
extern "C" extern "C"

View file

@ -512,7 +512,7 @@ class RelocIterator : public StackObj {
address _limit; // stop producing relocations after this _addr address _limit; // stop producing relocations after this _addr
relocInfo* _current; // the current relocation information relocInfo* _current; // the current relocation information
relocInfo* _end; // end marker; we're done iterating when _current == _end relocInfo* _end; // end marker; we're done iterating when _current == _end
CodeBlob* _code; // compiled method containing _addr nmethod* _code; // compiled method containing _addr
address _addr; // instruction to which the relocation applies address _addr; // instruction to which the relocation applies
short _databuf; // spare buffer for compressed data short _databuf; // spare buffer for compressed data
short* _data; // pointer to the relocation's data short* _data; // pointer to the relocation's data
@ -549,7 +549,7 @@ class RelocIterator : public StackObj {
address compute_section_start(int n) const; // out-of-line helper address compute_section_start(int n) const; // out-of-line helper
void initialize(CodeBlob* nm, address begin, address limit); void initialize(nmethod* nm, address begin, address limit);
friend class PatchingRelocIterator; friend class PatchingRelocIterator;
// make an uninitialized one, for PatchingRelocIterator: // make an uninitialized one, for PatchingRelocIterator:
@ -557,7 +557,7 @@ class RelocIterator : public StackObj {
public: public:
// constructor // constructor
RelocIterator(CodeBlob* cb, address begin = NULL, address limit = NULL); RelocIterator(nmethod* nm, address begin = NULL, address limit = NULL);
RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL); RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
// get next reloc info, return !eos // get next reloc info, return !eos
@ -592,7 +592,7 @@ class RelocIterator : public StackObj {
relocType type() const { return current()->type(); } relocType type() const { return current()->type(); }
int format() const { return (relocInfo::have_format) ? current()->format() : 0; } int format() const { return (relocInfo::have_format) ? current()->format() : 0; }
address addr() const { return _addr; } address addr() const { return _addr; }
CodeBlob* code() const { return _code; } nmethod* code() const { return _code; }
short* data() const { return _data; } short* data() const { return _data; }
int datalen() const { return _datalen; } int datalen() const { return _datalen; }
bool has_current() const { return _datalen >= 0; } bool has_current() const { return _datalen >= 0; }
@ -791,7 +791,7 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
public: public:
// accessors which only make sense for a bound Relocation // accessors which only make sense for a bound Relocation
address addr() const { return binding()->addr(); } address addr() const { return binding()->addr(); }
CodeBlob* code() const { return binding()->code(); } nmethod* code() const { return binding()->code(); }
bool addr_in_const() const { return binding()->addr_in_const(); } bool addr_in_const() const { return binding()->addr_in_const(); }
protected: protected:
short* data() const { return binding()->data(); } short* data() const { return binding()->data(); }
@ -982,12 +982,12 @@ class virtual_call_Relocation : public CallRelocation {
// Figure out where an ic_call is hiding, given a set-oop or call. // Figure out where an ic_call is hiding, given a set-oop or call.
// Either ic_call or first_oop must be non-null; the other is deduced. // Either ic_call or first_oop must be non-null; the other is deduced.
// Code if non-NULL must be the CodeBlob, else it is deduced. // Code if non-NULL must be the nmethod, else it is deduced.
// The address of the patchable oop is also deduced. // The address of the patchable oop is also deduced.
// The returned iterator will enumerate over the oops and the ic_call, // The returned iterator will enumerate over the oops and the ic_call,
// as well as any other relocations that happen to be in that span of code. // as well as any other relocations that happen to be in that span of code.
// Recognize relevant set_oops with: oop_reloc()->oop_addr() == oop_addr. // Recognize relevant set_oops with: oop_reloc()->oop_addr() == oop_addr.
static RelocIterator parse_ic(CodeBlob* &code, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized); static RelocIterator parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
}; };
@ -1304,8 +1304,8 @@ inline name##_Relocation* RelocIterator::name##_reloc() { \
APPLY_TO_RELOCATIONS(EACH_CASE); APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE #undef EACH_CASE
inline RelocIterator::RelocIterator(CodeBlob* cb, address begin, address limit) { inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
initialize(cb, begin, limit); initialize(nm, begin, limit);
} }
// if you are going to patch code, you should use this subclass of // if you are going to patch code, you should use this subclass of
@ -1323,8 +1323,8 @@ class PatchingRelocIterator : public RelocIterator {
void operator=(const RelocIterator&); void operator=(const RelocIterator&);
public: public:
PatchingRelocIterator(CodeBlob* cb, address begin =NULL, address limit =NULL) PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
: RelocIterator(cb, begin, limit) { prepass(); } : RelocIterator(nm, begin, limit) { prepass(); }
~PatchingRelocIterator() { postpass(); } ~PatchingRelocIterator() { postpass(); }
}; };

View file

@ -1651,8 +1651,9 @@ void CompileBroker::handle_full_code_cache() {
log->stamp(); log->stamp();
log->end_elem(); log->end_elem();
} }
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
#ifndef PRODUCT #ifndef PRODUCT
warning("CodeCache is full. Compiler has been disabled");
if (CompileTheWorld || ExitOnFullCodeCache) { if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current()); before_exit(JavaThread::current());
exit_globals(); // will delete tty exit_globals(); // will delete tty

View file

@ -32,6 +32,23 @@
// highest ranked free list lock rank // highest ranked free list lock rank
int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
// Defaults are 0 so things will break badly if incorrectly initialized.
int CompactibleFreeListSpace::IndexSetStart = 0;
int CompactibleFreeListSpace::IndexSetStride = 0;
size_t MinChunkSize = 0;
void CompactibleFreeListSpace::set_cms_values() {
// Set CMS global values
assert(MinChunkSize == 0, "already set");
#define numQuanta(x,y) ((x+y-1)/y)
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
IndexSetStart = MinObjAlignment;
IndexSetStride = MinObjAlignment;
}
// Constructor // Constructor
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
MemRegion mr, bool use_adaptive_freelists, MemRegion mr, bool use_adaptive_freelists,
@ -302,7 +319,7 @@ size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
size_t count = 0; size_t count = 0;
for (int i = MinChunkSize; i < IndexSetSize; i++) { for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
debug_only( debug_only(
ssize_t total_list_count = 0; ssize_t total_list_count = 0;
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;

View file

@ -91,10 +91,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
enum SomeConstants { enum SomeConstants {
SmallForLinearAlloc = 16, // size < this then use _sLAB SmallForLinearAlloc = 16, // size < this then use _sLAB
SmallForDictionary = 257, // size < this then use _indexedFreeList SmallForDictionary = 257, // size < this then use _indexedFreeList
IndexSetSize = SmallForDictionary, // keep this odd-sized IndexSetSize = SmallForDictionary // keep this odd-sized
IndexSetStart = MinObjAlignment,
IndexSetStride = MinObjAlignment
}; };
static int IndexSetStart;
static int IndexSetStride;
private: private:
enum FitStrategyOptions { enum FitStrategyOptions {
@ -278,6 +278,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
HeapWord* nearLargestChunk() const { return _nearLargestChunk; } HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
// Set CMS global values
static void set_cms_values();
// Return the free chunk at the end of the space. If no such // Return the free chunk at the end of the space. If no such
// chunk exists, return NULL. // chunk exists, return NULL.
FreeChunk* find_chunk_at_end(); FreeChunk* find_chunk_at_end();

View file

@ -159,7 +159,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
CardTableRS* ct, bool use_adaptive_freelists, CardTableRS* ct, bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice dictionaryChoice) : FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
CardGeneration(rs, initial_byte_size, level, ct), CardGeneration(rs, initial_byte_size, level, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_debug_collection_type(Concurrent_collection_type) _debug_collection_type(Concurrent_collection_type)
{ {
HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* bottom = (HeapWord*) _virtual_space.low();
@ -222,7 +222,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// promoting generation, we'll instead just use the mimimum // promoting generation, we'll instead just use the mimimum
// object size (which today is a header's worth of space); // object size (which today is a header's worth of space);
// note that all arithmetic is in units of HeapWords. // note that all arithmetic is in units of HeapWords.
assert(MinChunkSize >= oopDesc::header_size(), "just checking"); assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
assert(_dilatation_factor >= 1.0, "from previous assert"); assert(_dilatation_factor >= 1.0, "from previous assert");
} }

View file

@ -133,9 +133,5 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
void print_on(outputStream* st); void print_on(outputStream* st);
}; };
// Alignment helpers etc. extern size_t MinChunkSize;
#define numQuanta(x,y) ((x+y-1)/y)
enum AlignmentConstants {
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
};

View file

@ -3644,7 +3644,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
do { do {
free_words = r->free()/HeapWordSize; free_words = r->free()/HeapWordSize;
// If there's too little space, no one can allocate, so we're done. // If there's too little space, no one can allocate, so we're done.
if (free_words < (size_t)oopDesc::header_size()) return; if (free_words < CollectedHeap::min_fill_size()) return;
// Otherwise, try to claim it. // Otherwise, try to claim it.
block = r->par_allocate(free_words); block = r->par_allocate(free_words);
} while (block == NULL); } while (block == NULL);

View file

@ -2523,14 +2523,14 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
} }
if (ParallelGCThreads > 0) { if (ParallelGCThreads > 0) {
const size_t OverpartitionFactor = 4; const size_t OverpartitionFactor = 4;
const size_t MinChunkSize = 8; const size_t MinWorkUnit = 8;
const size_t ChunkSize = const size_t WorkUnit =
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
MinChunkSize); MinWorkUnit);
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
ChunkSize); WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
(int) ChunkSize); (int) WorkUnit);
_g1->workers()->run_task(&parKnownGarbageTask); _g1->workers()->run_task(&parKnownGarbageTask);
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),

View file

@ -711,6 +711,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
// object in the region. // object in the region.
if (region_ptr->data_size() == RegionSize) { if (region_ptr->data_size() == RegionSize) {
result += pointer_delta(addr, region_addr); result += pointer_delta(addr, region_addr);
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
return result; return result;
} }
@ -1487,13 +1488,14 @@ PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
space->set_top_for_allocations(); space->set_top_for_allocations();
} }
size_t obj_len = 8; size_t min_size = CollectedHeap::min_fill_size();
size_t obj_len = min_size;
while (b + obj_len <= t) { while (b + obj_len <= t) {
CollectedHeap::fill_with_object(b, obj_len); CollectedHeap::fill_with_object(b, obj_len);
mark_bitmap()->mark_obj(b, obj_len); mark_bitmap()->mark_obj(b, obj_len);
summary_data().add_obj(b, obj_len); summary_data().add_obj(b, obj_len);
b += obj_len; b += obj_len;
obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ... obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
} }
if (b < t) { if (b < t) {
// The loop didn't completely fill to t (top); adjust top downward. // The loop didn't completely fill to t (top); adjust top downward.
@ -1680,11 +1682,13 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
// +-------+ // +-------+
// Initially assume case a, c or e will apply. // Initially assume case a, c or e will apply.
size_t obj_len = (size_t)oopDesc::header_size(); size_t obj_len = CollectedHeap::min_fill_size();
HeapWord* obj_beg = dense_prefix_end - obj_len; HeapWord* obj_beg = dense_prefix_end - obj_len;
#ifdef _LP64 #ifdef _LP64
if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) { if (MinObjAlignment > 1) { // object alignment > heap word size
// Cases a, c or e.
} else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
// Case b above. // Case b above.
obj_beg = dense_prefix_end - 1; obj_beg = dense_prefix_end - 1;
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) && } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&

View file

@ -1414,6 +1414,8 @@ PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
{ {
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr), assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
"must move left or to a different space"); "must move left or to a different space");
assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
"checking alignment");
} }
#endif // ASSERT #endif // ASSERT

View file

@ -761,7 +761,7 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
if (p != NULL) { if (p != NULL) {
size_t remainder = s->free_in_words(); size_t remainder = s->free_in_words();
if (remainder < (size_t)oopDesc::header_size() && remainder > 0) { if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
s->set_top(s->top() - size); s->set_top(s->top() - size);
p = NULL; p = NULL;
} }
@ -803,7 +803,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
HeapWord *p = s->cas_allocate(size); HeapWord *p = s->cas_allocate(size);
if (p != NULL) { if (p != NULL) {
size_t remainder = pointer_delta(s->end(), p + size); size_t remainder = pointer_delta(s->end(), p + size);
if (remainder < (size_t)oopDesc::header_size() && remainder > 0) { if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
if (s->cas_deallocate(p, size)) { if (s->cas_deallocate(p, size)) {
// We were the last to allocate and created a fragment less than // We were the last to allocate and created a fragment less than
// a minimal object. // a minimal object.

View file

@ -239,11 +239,11 @@ oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
} }
size_t CollectedHeap::filler_array_hdr_size() { size_t CollectedHeap::filler_array_hdr_size() {
return size_t(arrayOopDesc::header_size(T_INT)); return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
} }
size_t CollectedHeap::filler_array_min_size() { size_t CollectedHeap::filler_array_min_size() {
return align_object_size(filler_array_hdr_size()); return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
} }
size_t CollectedHeap::filler_array_max_size() { size_t CollectedHeap::filler_array_max_size() {

View file

@ -827,6 +827,7 @@ ciStreams.cpp ciField.hpp
ciStreams.cpp ciStreams.hpp ciStreams.cpp ciStreams.hpp
ciStreams.cpp ciUtilities.hpp ciStreams.cpp ciUtilities.hpp
ciStreams.hpp bytecode.hpp
ciStreams.hpp ciClassList.hpp ciStreams.hpp ciClassList.hpp
ciStreams.hpp ciExceptionHandler.hpp ciStreams.hpp ciExceptionHandler.hpp
ciStreams.hpp ciInstanceKlass.hpp ciStreams.hpp ciInstanceKlass.hpp
@ -3635,6 +3636,7 @@ rewriter.cpp bytecodes.hpp
rewriter.cpp gcLocker.hpp rewriter.cpp gcLocker.hpp
rewriter.cpp generateOopMap.hpp rewriter.cpp generateOopMap.hpp
rewriter.cpp interpreter.hpp rewriter.cpp interpreter.hpp
rewriter.cpp methodComparator.hpp
rewriter.cpp objArrayOop.hpp rewriter.cpp objArrayOop.hpp
rewriter.cpp oop.inline.hpp rewriter.cpp oop.inline.hpp
rewriter.cpp oopFactory.hpp rewriter.cpp oopFactory.hpp

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,19 +26,12 @@
#include "incls/_bytecode.cpp.incl" #include "incls/_bytecode.cpp.incl"
// Implementation of Bytecode // Implementation of Bytecode
// Should eventually get rid of these functions and use ThisRelativeObj methods instead
void Bytecode::set_code(Bytecodes::Code code) { bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
Bytecodes::check(code); assert(Bytecodes::can_rewrite(code), "post-check only");
*addr_at(0) = u_char(code);
}
bool Bytecode::check_must_rewrite() const {
assert(Bytecodes::can_rewrite(code()), "post-check only");
// Some codes are conditionally rewriting. Look closely at them. // Some codes are conditionally rewriting. Look closely at them.
switch (code()) { switch (code) {
case Bytecodes::_aload_0: case Bytecodes::_aload_0:
// Even if RewriteFrequentPairs is turned on, // Even if RewriteFrequentPairs is turned on,
// the _aload_0 code might delay its rewrite until // the _aload_0 code might delay its rewrite until
@ -58,14 +51,85 @@ bool Bytecode::check_must_rewrite() const {
} }
#ifdef ASSERT
void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
Bytecodes::Code thisbc = Bytecodes::cast(byte_at(0));
if (thisbc == Bytecodes::_breakpoint) return; // let the assertion fail silently
if (is_wide) {
assert(thisbc == Bytecodes::_wide, "expected a wide instruction");
thisbc = Bytecodes::cast(byte_at(1));
if (thisbc == Bytecodes::_breakpoint) return;
}
int thisflags = Bytecodes::flags(testbc, is_wide) & Bytecodes::_all_fmt_bits;
int testflags = Bytecodes::flags(thisbc, is_wide) & Bytecodes::_all_fmt_bits;
if (thisflags != testflags)
tty->print_cr("assert_same_format_as(%d) failed on bc=%d%s; %d != %d",
(int)testbc, (int)thisbc, (is_wide?"/wide":""), testflags, thisflags);
assert(thisflags == testflags, "expected format");
}
void Bytecode::assert_index_size(int size, Bytecodes::Code bc, bool is_wide) {
int have_fmt = (Bytecodes::flags(bc, is_wide)
& (Bytecodes::_fmt_has_u2 | Bytecodes::_fmt_has_u4 |
Bytecodes::_fmt_not_simple |
// Not an offset field:
Bytecodes::_fmt_has_o));
int need_fmt = -1;
switch (size) {
case 1: need_fmt = 0; break;
case 2: need_fmt = Bytecodes::_fmt_has_u2; break;
case 4: need_fmt = Bytecodes::_fmt_has_u4; break;
}
if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
if (have_fmt != need_fmt) {
tty->print_cr("assert_index_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
assert(have_fmt == need_fmt, "assert_index_size");
}
}
void Bytecode::assert_offset_size(int size, Bytecodes::Code bc, bool is_wide) {
int have_fmt = Bytecodes::flags(bc, is_wide) & Bytecodes::_all_fmt_bits;
int need_fmt = -1;
switch (size) {
case 2: need_fmt = Bytecodes::_fmt_bo2; break;
case 4: need_fmt = Bytecodes::_fmt_bo4; break;
}
if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
if (have_fmt != need_fmt) {
tty->print_cr("assert_offset_size %d: bc=%d%s %d != %d", size, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
assert(have_fmt == need_fmt, "assert_offset_size");
}
}
void Bytecode::assert_constant_size(int size, int where, Bytecodes::Code bc, bool is_wide) {
int have_fmt = Bytecodes::flags(bc, is_wide) & (Bytecodes::_all_fmt_bits
// Ignore any 'i' field (for iinc):
& ~Bytecodes::_fmt_has_i);
int need_fmt = -1;
switch (size) {
case 1: need_fmt = Bytecodes::_fmt_bc; break;
case 2: need_fmt = Bytecodes::_fmt_bc | Bytecodes::_fmt_has_u2; break;
}
if (is_wide) need_fmt |= Bytecodes::_fmt_not_simple;
int length = is_wide ? Bytecodes::wide_length_for(bc) : Bytecodes::length_for(bc);
if (have_fmt != need_fmt || where + size != length) {
tty->print_cr("assert_constant_size %d @%d: bc=%d%s %d != %d", size, where, bc, (is_wide?"/wide":""), have_fmt, need_fmt);
}
assert(have_fmt == need_fmt, "assert_constant_size");
assert(where + size == length, "assert_constant_size oob");
}
void Bytecode::assert_native_index(Bytecodes::Code bc, bool is_wide) {
assert((Bytecodes::flags(bc, is_wide) & Bytecodes::_fmt_has_nbo) != 0, "native index");
}
#endif //ASSERT
// Implementation of Bytecode_tableupswitch // Implementation of Bytecode_tableupswitch
int Bytecode_tableswitch::dest_offset_at(int i) const { int Bytecode_tableswitch::dest_offset_at(int i) const {
address x = aligned_addr_at(1); return get_Java_u4_at(aligned_offset(1 + (3 + i)*jintSize));
int x2 = aligned_offset(1 + (3 + i)*jintSize);
int val = java_signed_word_at(x2);
return java_signed_word_at(aligned_offset(1 + (3 + i)*jintSize));
} }
@ -74,6 +138,7 @@ int Bytecode_tableswitch::dest_offset_at(int i) const {
void Bytecode_invoke::verify() const { void Bytecode_invoke::verify() const {
Bytecodes::Code bc = adjusted_invoke_code(); Bytecodes::Code bc = adjusted_invoke_code();
assert(is_valid(), "check invoke"); assert(is_valid(), "check invoke");
assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
} }
@ -116,27 +181,12 @@ methodHandle Bytecode_invoke::static_target(TRAPS) {
int Bytecode_invoke::index() const { int Bytecode_invoke::index() const {
// Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
// at the same time it allocates per-call-site CP cache entries. // at the same time it allocates per-call-site CP cache entries.
if (has_giant_index()) Bytecodes::Code stdc = Bytecodes::java_code(code());
return Bytes::get_native_u4(bcp() + 1); Bytecode* invoke = Bytecode_at(bcp());
if (invoke->has_index_u4(stdc))
return invoke->get_index_u4(stdc);
else else
return Bytes::get_Java_u2(bcp() + 1); return invoke->get_index_u2_cpcache(stdc);
}
// Implementation of Bytecode_static
void Bytecode_static::verify() const {
assert(Bytecodes::java_code(code()) == Bytecodes::_putstatic
|| Bytecodes::java_code(code()) == Bytecodes::_getstatic, "check static");
}
BasicType Bytecode_static::result_type(methodOop method) const {
int index = java_hwrd_at(1);
constantPoolOop constants = method->constants();
symbolOop field_type = constants->signature_ref_at(index);
BasicType basic_type = FieldType::basic_type(field_type);
return basic_type;
} }
@ -156,7 +206,8 @@ bool Bytecode_field::is_static() const {
int Bytecode_field::index() const { int Bytecode_field::index() const {
return java_hwrd_at(1); Bytecode* invoke = Bytecode_at(bcp());
return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
} }
@ -164,7 +215,14 @@ int Bytecode_field::index() const {
int Bytecode_loadconstant::index() const { int Bytecode_loadconstant::index() const {
Bytecodes::Code stdc = Bytecodes::java_code(code()); Bytecodes::Code stdc = Bytecodes::java_code(code());
return stdc == Bytecodes::_ldc ? java_byte_at(1) : java_hwrd_at(1); if (stdc != Bytecodes::_wide) {
if (Bytecodes::java_code(stdc) == Bytecodes::_ldc)
return get_index_u1(stdc);
else
return get_index_u2(stdc, false);
}
stdc = Bytecodes::code_at(addr_at(1));
return get_index_u2(stdc, true);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,92 +26,100 @@
// relative to an objects 'this' pointer. // relative to an objects 'this' pointer.
class ThisRelativeObj VALUE_OBJ_CLASS_SPEC { class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
private:
int sign_extend (int x, int size) const { const int s = (BytesPerInt - size)*BitsPerByte; return (x << s) >> s; }
public: public:
// Address computation // Address computation
address addr_at (int offset) const { return (address)this + offset; } address addr_at (int offset) const { return (address)this + offset; }
int byte_at (int offset) const { return *(addr_at(offset)); }
address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); } address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); } int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); }
// Java unsigned accessors (using Java spec byte ordering) // Word access:
int java_byte_at (int offset) const { return *(jubyte*)addr_at(offset); } int get_Java_u2_at (int offset) const { return Bytes::get_Java_u2(addr_at(offset)); }
int java_hwrd_at (int offset) const { return java_byte_at(offset) << (1 * BitsPerByte) | java_byte_at(offset + 1); } int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
int java_word_at (int offset) const { return java_hwrd_at(offset) << (2 * BitsPerByte) | java_hwrd_at(offset + 2); } int get_native_u2_at (int offset) const { return Bytes::get_native_u2(addr_at(offset)); }
int get_native_u4_at (int offset) const { return Bytes::get_native_u4(addr_at(offset)); }
// Java signed accessors (using Java spec byte ordering)
int java_signed_byte_at(int offset) const { return sign_extend(java_byte_at(offset), 1); }
int java_signed_hwrd_at(int offset) const { return sign_extend(java_hwrd_at(offset), 2); }
int java_signed_word_at(int offset) const { return java_word_at(offset) ; }
// Fast accessors (using the machine's natural byte ordering)
int fast_byte_at (int offset) const { return *(jubyte *)addr_at(offset); }
int fast_hwrd_at (int offset) const { return *(jushort*)addr_at(offset); }
int fast_word_at (int offset) const { return *(juint *)addr_at(offset); }
// Fast signed accessors (using the machine's natural byte ordering)
int fast_signed_byte_at(int offset) const { return *(jbyte *)addr_at(offset); }
int fast_signed_hwrd_at(int offset) const { return *(jshort*)addr_at(offset); }
int fast_signed_word_at(int offset) const { return *(jint *)addr_at(offset); }
// Fast manipulators (using the machine's natural byte ordering)
void set_fast_byte_at (int offset, int x) const { *(jbyte *)addr_at(offset) = (jbyte )x; }
void set_fast_hwrd_at (int offset, int x) const { *(jshort*)addr_at(offset) = (jshort)x; }
void set_fast_word_at (int offset, int x) const { *(jint *)addr_at(offset) = (jint )x; }
}; };
// The base class for different kinds of bytecode abstractions. // The base class for different kinds of bytecode abstractions.
// Provides the primitive operations to manipulate code relative // Provides the primitive operations to manipulate code relative
// to an objects 'this' pointer. // to an objects 'this' pointer.
// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
class Bytecode: public ThisRelativeObj { class Bytecode: public ThisRelativeObj {
protected: protected:
u_char byte_at(int offset) const { return *addr_at(offset); } u_char byte_at(int offset) const { return *addr_at(offset); }
bool check_must_rewrite() const; bool check_must_rewrite(Bytecodes::Code bc) const;
public: public:
// Attributes // Attributes
address bcp() const { return addr_at(0); } address bcp() const { return addr_at(0); }
address next_bcp() const { return addr_at(0) + Bytecodes::length_at(bcp()); }
int instruction_size() const { return Bytecodes::length_at(bcp()); } int instruction_size() const { return Bytecodes::length_at(bcp()); }
// Warning: Use code() with caution on live bytecode streams. 4926272
Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); } Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); }
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
bool must_rewrite() const { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); } bool must_rewrite(Bytecodes::Code code) const { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
int one_byte_index() const { assert_index_size(1); return byte_at(1); }
int two_byte_index() const { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); }
int offset() const { return (two_byte_index() << 16) >> 16; }
address destination() const { return bcp() + offset(); }
// Attribute modification
void set_code(Bytecodes::Code code);
// Creation // Creation
inline friend Bytecode* Bytecode_at(address bcp); inline friend Bytecode* Bytecode_at(address bcp);
private: // Static functions for parsing bytecodes in place.
void assert_index_size(int required_size) const { int get_index_u1(Bytecodes::Code bc) const {
#ifdef ASSERT assert_same_format_as(bc); assert_index_size(1, bc);
int isize = instruction_size() - 1; return *(jubyte*)addr_at(1);
if (isize == 2 && code() == Bytecodes::_iinc) }
isize = 1; int get_index_u2(Bytecodes::Code bc, bool is_wide = false) const {
else if (isize <= 2) assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
; // no change address p = addr_at(is_wide ? 2 : 1);
else if (code() == Bytecodes::_invokedynamic) if (can_use_native_byte_order(bc, is_wide))
isize = 4; return Bytes::get_native_u2(p);
else else return Bytes::get_Java_u2(p);
isize = 2; }
assert(isize = required_size, "wrong index size"); int get_index_u2_cpcache(Bytecodes::Code bc) const {
#endif assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
}
int get_index_u4(Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_index_size(4, bc);
assert(can_use_native_byte_order(bc), "");
return Bytes::get_native_u4(addr_at(1));
}
bool has_index_u4(Bytecodes::Code bc) const {
return bc == Bytecodes::_invokedynamic;
}
int get_offset_s2(Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_offset_size(2, bc);
return (jshort) Bytes::get_Java_u2(addr_at(1));
}
int get_offset_s4(Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_offset_size(4, bc);
return (jint) Bytes::get_Java_u4(addr_at(1));
}
int get_constant_u1(int offset, Bytecodes::Code bc) const {
assert_same_format_as(bc); assert_constant_size(1, offset, bc);
return *(jbyte*)addr_at(offset);
}
int get_constant_u2(int offset, Bytecodes::Code bc, bool is_wide = false) const {
assert_same_format_as(bc, is_wide); assert_constant_size(2, offset, bc, is_wide);
return (jshort) Bytes::get_Java_u2(addr_at(offset));
}
// These are used locally and also from bytecode streams.
void assert_same_format_as(Bytecodes::Code testbc, bool is_wide = false) const NOT_DEBUG_RETURN;
static void assert_index_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
static void assert_offset_size(int required_size, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
static void assert_constant_size(int required_size, int where, Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
static void assert_native_index(Bytecodes::Code bc, bool is_wide = false) NOT_DEBUG_RETURN;
static bool can_use_native_byte_order(Bytecodes::Code bc, bool is_wide = false) {
return (!Bytes::is_Java_byte_ordering_different() || Bytecodes::native_byte_order(bc /*, is_wide*/));
} }
}; };
inline Bytecode* Bytecode_at(address bcp) { inline Bytecode* Bytecode_at(address bcp) {
// Warning: Use with caution on live bytecode streams. 4926272
return (Bytecode*)bcp; return (Bytecode*)bcp;
} }
@ -124,8 +132,8 @@ class LookupswitchPair: ThisRelativeObj {
int _offset; int _offset;
public: public:
int match() const { return java_signed_word_at(0 * jintSize); } int match() const { return get_Java_u4_at(0 * jintSize); }
int offset() const { return java_signed_word_at(1 * jintSize); } int offset() const { return get_Java_u4_at(1 * jintSize); }
}; };
@ -134,8 +142,8 @@ class Bytecode_lookupswitch: public Bytecode {
void verify() const PRODUCT_RETURN; void verify() const PRODUCT_RETURN;
// Attributes // Attributes
int default_offset() const { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); } int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
int number_of_pairs() const { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); } int number_of_pairs() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
LookupswitchPair* pair_at(int i) const { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds"); LookupswitchPair* pair_at(int i) const { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); } return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
// Creation // Creation
@ -154,9 +162,9 @@ class Bytecode_tableswitch: public Bytecode {
void verify() const PRODUCT_RETURN; void verify() const PRODUCT_RETURN;
// Attributes // Attributes
int default_offset() const { return java_signed_word_at(aligned_offset(1 + 0*jintSize)); } int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
int low_key() const { return java_signed_word_at(aligned_offset(1 + 1*jintSize)); } int low_key() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
int high_key() const { return java_signed_word_at(aligned_offset(1 + 2*jintSize)); } int high_key() const { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
int dest_offset_at(int i) const; int dest_offset_at(int i) const;
int length() { return high_key()-low_key()+1; } int length() { return high_key()-low_key()+1; }
@ -206,7 +214,6 @@ class Bytecode_invoke: public ResourceObj {
bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; } bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); } bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); }
bool has_giant_index() const { return is_invokedynamic(); }
bool is_valid() const { return is_invokeinterface() || bool is_valid() const { return is_invokeinterface() ||
is_invokevirtual() || is_invokevirtual() ||
@ -252,26 +259,6 @@ inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) {
} }
// Abstraction for {get,put}static
class Bytecode_static: public Bytecode {
public:
void verify() const;
// Returns the result type of the send by inspecting the field ref
BasicType result_type(methodOop method) const;
// Creation
inline friend Bytecode_static* Bytecode_static_at(const methodOop method, address bcp);
};
inline Bytecode_static* Bytecode_static_at(const methodOop method, address bcp) {
Bytecode_static* b = (Bytecode_static*)bcp;
debug_only(b->verify());
return b;
}
// Abstraction for checkcast // Abstraction for checkcast
class Bytecode_checkcast: public Bytecode { class Bytecode_checkcast: public Bytecode {
@ -279,7 +266,7 @@ class Bytecode_checkcast: public Bytecode {
void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); } void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
// Returns index // Returns index
long index() const { return java_hwrd_at(1); }; long index() const { return get_index_u2(Bytecodes::_checkcast); };
// Creation // Creation
inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp); inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
@ -299,7 +286,7 @@ class Bytecode_instanceof: public Bytecode {
void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); } void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
// Returns index // Returns index
long index() const { return java_hwrd_at(1); }; long index() const { return get_index_u2(Bytecodes::_instanceof); };
// Creation // Creation
inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp); inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
@ -317,7 +304,7 @@ class Bytecode_new: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_new, "check new"); } void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
// Returns index // Returns index
long index() const { return java_hwrd_at(1); }; long index() const { return get_index_u2(Bytecodes::_new); };
// Creation // Creation
inline friend Bytecode_new* Bytecode_new_at(address bcp); inline friend Bytecode_new* Bytecode_new_at(address bcp);
@ -335,7 +322,7 @@ class Bytecode_multianewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); } void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
// Returns index // Returns index
long index() const { return java_hwrd_at(1); }; long index() const { return get_index_u2(Bytecodes::_multianewarray); };
// Creation // Creation
inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp); inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
@ -353,7 +340,7 @@ class Bytecode_anewarray: public Bytecode {
void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); } void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
// Returns index // Returns index
long index() const { return java_hwrd_at(1); }; long index() const { return get_index_u2(Bytecodes::_anewarray); };
// Creation // Creation
inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp); inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,25 @@ Bytecodes::Code RawBytecodeStream::raw_next_special(Bytecodes::Code code) {
} }
} }
} }
_code = code; _raw_code = code;
return code; return code;
} }
#ifdef ASSERT
void BaseBytecodeStream::assert_raw_index_size(int size) const {
if (raw_code() == Bytecodes::_invokedynamic && is_raw()) {
// in raw mode, pretend indy is "bJJ__"
assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
} else {
bytecode()->assert_index_size(size, raw_code(), is_wide());
}
}
void BaseBytecodeStream::assert_raw_stream(bool want_raw) const {
if (want_raw) {
assert( is_raw(), "this function only works on raw streams");
} else {
assert(!is_raw(), "this function only works on non-raw streams");
}
}
#endif //ASSERT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,13 +32,13 @@
// while ((c = s.next()) >= 0) { // while ((c = s.next()) >= 0) {
// ... // ...
// } // }
//
// A RawBytecodeStream is a simple version of BytecodeStream. // A RawBytecodeStream is a simple version of BytecodeStream.
// It is used ONLY when we know the bytecodes haven't been rewritten // It is used ONLY when we know the bytecodes haven't been rewritten
// yet, such as in the rewriter or the verifier. Currently only the // yet, such as in the rewriter or the verifier.
// verifier uses this class.
class RawBytecodeStream: StackObj { // Here is the common base class for both RawBytecodeStream and BytecodeStream:
class BaseBytecodeStream: StackObj {
protected: protected:
// stream buffer // stream buffer
methodHandle _method; // read from method directly methodHandle _method; // read from method directly
@ -49,15 +49,17 @@ class RawBytecodeStream: StackObj {
int _end_bci; // bci after the current iteration interval int _end_bci; // bci after the current iteration interval
// last bytecode read // last bytecode read
Bytecodes::Code _code; Bytecodes::Code _raw_code;
bool _is_wide; bool _is_wide;
bool _is_raw; // false in 'cooked' BytecodeStream
public:
// Construction // Construction
RawBytecodeStream(methodHandle method) : _method(method) { BaseBytecodeStream(methodHandle method) : _method(method) {
set_interval(0, _method->code_size()); set_interval(0, _method->code_size());
_is_raw = false;
} }
public:
// Iteration control // Iteration control
void set_interval(int beg_bci, int end_bci) { void set_interval(int beg_bci, int end_bci) {
// iterate over the interval [beg_bci, end_bci) // iterate over the interval [beg_bci, end_bci)
@ -72,6 +74,46 @@ class RawBytecodeStream: StackObj {
set_interval(beg_bci, _method->code_size()); set_interval(beg_bci, _method->code_size());
} }
bool is_raw() const { return _is_raw; }
// Stream attributes
methodHandle method() const { return _method; }
int bci() const { return _bci; }
int next_bci() const { return _next_bci; }
int end_bci() const { return _end_bci; }
Bytecodes::Code raw_code() const { return _raw_code; }
bool is_wide() const { return _is_wide; }
int instruction_size() const { return (_next_bci - _bci); }
bool is_last_bytecode() const { return _next_bci >= _end_bci; }
address bcp() const { return method()->code_base() + _bci; }
Bytecode* bytecode() const { return Bytecode_at(bcp()); }
// State changes
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
// Bytecode-specific attributes
int dest() const { return bci() + bytecode()->get_offset_s2(raw_code()); }
int dest_w() const { return bci() + bytecode()->get_offset_s4(raw_code()); }
// One-byte indices.
int get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
protected:
void assert_raw_index_size(int size) const NOT_DEBUG_RETURN;
void assert_raw_stream(bool want_raw) const NOT_DEBUG_RETURN;
};
class RawBytecodeStream: public BaseBytecodeStream {
public:
// Construction
RawBytecodeStream(methodHandle method) : BaseBytecodeStream(method) {
_is_raw = true;
}
public:
// Iteration // Iteration
// Use raw_next() rather than next() for faster method reference // Use raw_next() rather than next() for faster method reference
Bytecodes::Code raw_next() { Bytecodes::Code raw_next() {
@ -80,7 +122,7 @@ class RawBytecodeStream: StackObj {
_bci = _next_bci; _bci = _next_bci;
assert(!is_last_bytecode(), "caller should check is_last_bytecode()"); assert(!is_last_bytecode(), "caller should check is_last_bytecode()");
address bcp = RawBytecodeStream::bcp(); address bcp = this->bcp();
code = Bytecodes::code_or_bp_at(bcp); code = Bytecodes::code_or_bp_at(bcp);
// set next bytecode position // set next bytecode position
@ -90,84 +132,49 @@ class RawBytecodeStream: StackObj {
&& code != Bytecodes::_lookupswitch, "can't be special bytecode"); && code != Bytecodes::_lookupswitch, "can't be special bytecode");
_is_wide = false; _is_wide = false;
_next_bci += l; _next_bci += l;
_code = code; _raw_code = code;
return code; return code;
} else if (code == Bytecodes::_wide && _bci + 1 >= _end_bci) {
return Bytecodes::_illegal;
} else { } else {
return raw_next_special(code); return raw_next_special(code);
} }
} }
Bytecodes::Code raw_next_special(Bytecodes::Code code); Bytecodes::Code raw_next_special(Bytecodes::Code code);
// Stream attributes // Unsigned indices, widening, with no swapping of bytes
methodHandle method() const { return _method; } int get_index() const { return (is_wide()) ? get_index_u2_raw(bcp() + 2) : get_index_u1(); }
// Get an unsigned 2-byte index, with no swapping of bytes.
int bci() const { return _bci; } int get_index_u2() const { assert(!is_wide(), ""); return get_index_u2_raw(bcp() + 1); }
int next_bci() const { return _next_bci; }
int end_bci() const { return _end_bci; }
Bytecodes::Code code() const { return _code; }
bool is_wide() const { return _is_wide; }
int instruction_size() const { return (_next_bci - _bci); }
bool is_last_bytecode() const { return _next_bci >= _end_bci; }
address bcp() const { return method()->code_base() + _bci; }
address next_bcp() { return method()->code_base() + _next_bci; }
// State changes
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
// Bytecode-specific attributes
int dest() const { return bci() + (short)Bytes::get_Java_u2(bcp() + 1); }
int dest_w() const { return bci() + (int )Bytes::get_Java_u4(bcp() + 1); }
// Unsigned indices, widening
int get_index() const { assert_index_size(is_wide() ? 2 : 1);
return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
int get_index_big() const { assert_index_size(2);
return (int)Bytes::get_Java_u2(bcp() + 1); }
int get_index_int() const { return has_giant_index() ? get_index_giant() : get_index_big(); }
int get_index_giant() const { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); }
int has_giant_index() const { return (code() == Bytecodes::_invokedynamic); }
private: private:
void assert_index_size(int required_size) const { int get_index_u2_raw(address p) const {
#ifdef ASSERT assert_raw_index_size(2); assert_raw_stream(true);
int isize = instruction_size() - (int)_is_wide - 1; return Bytes::get_Java_u2(p);
if (isize == 2 && code() == Bytecodes::_iinc)
isize = 1;
else if (isize <= 2)
; // no change
else if (has_giant_index())
isize = 4;
else
isize = 2;
assert(isize = required_size, "wrong index size");
#endif
} }
}; };
// In BytecodeStream, non-java bytecodes will be translated into the // In BytecodeStream, non-java bytecodes will be translated into the
// corresponding java bytecodes. // corresponding java bytecodes.
class BytecodeStream: public RawBytecodeStream { class BytecodeStream: public BaseBytecodeStream {
Bytecodes::Code _code;
public: public:
// Construction // Construction
BytecodeStream(methodHandle method) : RawBytecodeStream(method) { } BytecodeStream(methodHandle method) : BaseBytecodeStream(method) { }
// Iteration // Iteration
Bytecodes::Code next() { Bytecodes::Code next() {
Bytecodes::Code code; Bytecodes::Code raw_code, code;
// set reading position // set reading position
_bci = _next_bci; _bci = _next_bci;
if (is_last_bytecode()) { if (is_last_bytecode()) {
// indicate end of bytecode stream // indicate end of bytecode stream
code = Bytecodes::_illegal; raw_code = code = Bytecodes::_illegal;
} else { } else {
// get bytecode // get bytecode
address bcp = BytecodeStream::bcp(); address bcp = this->bcp();
code = Bytecodes::java_code_at(bcp); raw_code = Bytecodes::code_at(bcp);
code = Bytecodes::java_code(raw_code);
// set next bytecode position // set next bytecode position
// //
// note that we cannot advance before having the // note that we cannot advance before having the
@ -181,14 +188,29 @@ class BytecodeStream: public RawBytecodeStream {
_is_wide = false; _is_wide = false;
// check for special (uncommon) cases // check for special (uncommon) cases
if (code == Bytecodes::_wide) { if (code == Bytecodes::_wide) {
code = (Bytecodes::Code)bcp[1]; raw_code = (Bytecodes::Code)bcp[1];
code = raw_code; // wide BCs are always Java-normal
_is_wide = true; _is_wide = true;
} }
assert(Bytecodes::is_java_code(code), "sanity check"); assert(Bytecodes::is_java_code(code), "sanity check");
} }
_raw_code = raw_code;
_code = code; _code = code;
return _code; return _code;
} }
bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); } bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
Bytecodes::Code code() const { return _code; }
// Unsigned indices, widening
int get_index() const { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
// Get an unsigned 2-byte index, swapping the bytes if necessary.
int get_index_u2() const { assert_raw_stream(false);
return bytecode()->get_index_u2(raw_code(), false); }
// Get an unsigned 2-byte index in native order.
int get_index_u2_cpcache() const { assert_raw_stream(false);
return bytecode()->get_index_u2_cpcache(raw_code()); }
int get_index_u4() const { assert_raw_stream(false);
return bytecode()->get_index_u4(raw_code()); }
bool has_index_u4() const { return bytecode()->has_index_u4(raw_code()); }
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,7 @@ class BytecodePrinter: public BytecodeClosure {
// (Also, ensure that occasional false positives are benign.) // (Also, ensure that occasional false positives are benign.)
methodOop _current_method; methodOop _current_method;
bool _is_wide; bool _is_wide;
Bytecodes::Code _code;
address _next_pc; // current decoding position address _next_pc; // current decoding position
void align() { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); } void align() { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
@ -46,23 +47,26 @@ class BytecodePrinter: public BytecodeClosure {
short get_short() { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; } short get_short() { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
int get_int() { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; } int get_int() { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
int get_index() { return *(address)_next_pc++; } int get_index_u1() { return *(address)_next_pc++; }
int get_big_index() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; } int get_index_u2() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
int get_giant_index() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; } int get_index_u2_cpcache() { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
int get_index_special() { return (is_wide()) ? get_big_index() : get_index(); } int get_index_u4() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
int get_index_special() { return (is_wide()) ? get_index_u2() : get_index_u1(); }
methodOop method() { return _current_method; } methodOop method() { return _current_method; }
bool is_wide() { return _is_wide; } bool is_wide() { return _is_wide; }
Bytecodes::Code raw_code() { return Bytecodes::Code(_code); }
bool check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty); bool check_index(int i, int& cp_index, outputStream* st = tty);
void print_constant(int i, outputStream* st = tty); void print_constant(int i, outputStream* st = tty);
void print_field_or_method(int i, outputStream* st = tty); void print_field_or_method(int i, outputStream* st = tty);
void print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty); void print_attributes(int bci, outputStream* st = tty);
void bytecode_epilog(int bci, outputStream* st = tty); void bytecode_epilog(int bci, outputStream* st = tty);
public: public:
BytecodePrinter() { BytecodePrinter() {
_is_wide = false; _is_wide = false;
_code = Bytecodes::_illegal;
} }
// This method is called while executing the raw bytecodes, so none of // This method is called while executing the raw bytecodes, so none of
@ -89,6 +93,7 @@ class BytecodePrinter: public BytecodeClosure {
} else { } else {
code = Bytecodes::code_at(bcp); code = Bytecodes::code_at(bcp);
} }
_code = code;
int bci = bcp - method->code_base(); int bci = bcp - method->code_base();
st->print("[%d] ", (int) Thread::current()->osthread()->thread_id()); st->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
if (Verbose) { if (Verbose) {
@ -99,10 +104,11 @@ class BytecodePrinter: public BytecodeClosure {
BytecodeCounter::counter_value(), bci, Bytecodes::name(code)); BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
} }
_next_pc = is_wide() ? bcp+2 : bcp+1; _next_pc = is_wide() ? bcp+2 : bcp+1;
print_attributes(code, bci); print_attributes(bci);
// Set is_wide for the next one, since the caller of this doesn't skip // Set is_wide for the next one, since the caller of this doesn't skip
// the next bytecode. // the next bytecode.
_is_wide = (code == Bytecodes::_wide); _is_wide = (code == Bytecodes::_wide);
_code = Bytecodes::_illegal;
} }
// Used for methodOop::print_codes(). The input bcp comes from // Used for methodOop::print_codes(). The input bcp comes from
@ -116,6 +122,7 @@ class BytecodePrinter: public BytecodeClosure {
if (is_wide()) { if (is_wide()) {
code = Bytecodes::code_at(bcp+1); code = Bytecodes::code_at(bcp+1);
} }
_code = code;
int bci = bcp - method->code_base(); int bci = bcp - method->code_base();
// Print bytecode index and name // Print bytecode index and name
if (is_wide()) { if (is_wide()) {
@ -124,7 +131,7 @@ class BytecodePrinter: public BytecodeClosure {
st->print("%d %s", bci, Bytecodes::name(code)); st->print("%d %s", bci, Bytecodes::name(code));
} }
_next_pc = is_wide() ? bcp+2 : bcp+1; _next_pc = is_wide() ? bcp+2 : bcp+1;
print_attributes(code, bci, st); print_attributes(bci, st);
bytecode_epilog(bci, st); bytecode_epilog(bci, st);
} }
}; };
@ -185,12 +192,13 @@ void print_oop(oop value, outputStream* st) {
} }
} }
bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) { bool BytecodePrinter::check_index(int i, int& cp_index, outputStream* st) {
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
int ilimit = constants->length(), climit = 0; int ilimit = constants->length(), climit = 0;
Bytecodes::Code code = raw_code();
constantPoolCacheOop cache = NULL; constantPoolCacheOop cache = NULL;
if (in_cp_cache) { if (Bytecodes::uses_cp_cache(code)) {
cache = constants->cache(); cache = constants->cache();
if (cache != NULL) { if (cache != NULL) {
//climit = cache->length(); // %%% private! //climit = cache->length(); // %%% private!
@ -201,7 +209,7 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
} }
} }
if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) { if (cache != NULL && constantPoolCacheOopDesc::is_secondary_index(i)) {
i = constantPoolCacheOopDesc::decode_secondary_index(i); i = constantPoolCacheOopDesc::decode_secondary_index(i);
st->print(" secondary cache[%d] of", i); st->print(" secondary cache[%d] of", i);
if (i >= 0 && i < climit) { if (i >= 0 && i < climit) {
@ -218,8 +226,6 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
} }
if (cache != NULL) { if (cache != NULL) {
i = Bytes::swap_u2(i);
if (WizardMode) st->print(" (swap=%d)", i);
goto check_cache_index; goto check_cache_index;
} }
@ -234,6 +240,17 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
return false; return false;
check_cache_index: check_cache_index:
#ifdef ASSERT
{
const int CPCACHE_INDEX_TAG = constantPoolOopDesc::CPCACHE_INDEX_TAG;
if (i >= CPCACHE_INDEX_TAG && i < climit + CPCACHE_INDEX_TAG) {
i -= CPCACHE_INDEX_TAG;
} else {
st->print_cr(" CP[%d] missing bias?", i);
return false;
}
}
#endif //ASSERT
if (i >= 0 && i < climit) { if (i >= 0 && i < climit) {
if (cache->entry_at(i)->is_secondary_entry()) { if (cache->entry_at(i)->is_secondary_entry()) {
st->print_cr(" secondary entry?"); st->print_cr(" secondary entry?");
@ -248,7 +265,7 @@ bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, output
void BytecodePrinter::print_constant(int i, outputStream* st) { void BytecodePrinter::print_constant(int i, outputStream* st) {
int orig_i = i; int orig_i = i;
if (!check_index(orig_i, false, i, st)) return; if (!check_index(orig_i, i, st)) return;
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i); constantTag tag = constants->tag_at(i);
@ -279,7 +296,7 @@ void BytecodePrinter::print_constant(int i, outputStream* st) {
void BytecodePrinter::print_field_or_method(int i, outputStream* st) { void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
int orig_i = i; int orig_i = i;
if (!check_index(orig_i, true, i, st)) return; if (!check_index(orig_i, i, st)) return;
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i); constantTag tag = constants->tag_at(i);
@ -303,9 +320,9 @@ void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
} }
void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStream* st) { void BytecodePrinter::print_attributes(int bci, outputStream* st) {
// Show attributes of pre-rewritten codes // Show attributes of pre-rewritten codes
code = Bytecodes::java_code(code); Bytecodes::Code code = Bytecodes::java_code(raw_code());
// If the code doesn't have any fields there's nothing to print. // If the code doesn't have any fields there's nothing to print.
// note this is ==1 because the tableswitch and lookupswitch are // note this is ==1 because the tableswitch and lookupswitch are
// zero size (for some reason) and we want to print stuff out for them. // zero size (for some reason) and we want to print stuff out for them.
@ -323,12 +340,12 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
st->print_cr(" " INT32_FORMAT, get_short()); st->print_cr(" " INT32_FORMAT, get_short());
break; break;
case Bytecodes::_ldc: case Bytecodes::_ldc:
print_constant(get_index(), st); print_constant(get_index_u1(), st);
break; break;
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w: case Bytecodes::_ldc2_w:
print_constant(get_big_index(), st); print_constant(get_index_u2(), st);
break; break;
case Bytecodes::_iload: case Bytecodes::_iload:
@ -352,7 +369,7 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
break; break;
case Bytecodes::_newarray: { case Bytecodes::_newarray: {
BasicType atype = (BasicType)get_index(); BasicType atype = (BasicType)get_index_u1();
const char* str = type2name(atype); const char* str = type2name(atype);
if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) { if (str == NULL || atype == T_OBJECT || atype == T_ARRAY) {
assert(false, "Unidentified basic type"); assert(false, "Unidentified basic type");
@ -361,15 +378,15 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
} }
break; break;
case Bytecodes::_anewarray: { case Bytecodes::_anewarray: {
int klass_index = get_big_index(); int klass_index = get_index_u2();
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(klass_index); symbolOop name = constants->klass_name_at(klass_index);
st->print_cr(" %s ", name->as_C_string()); st->print_cr(" %s ", name->as_C_string());
} }
break; break;
case Bytecodes::_multianewarray: { case Bytecodes::_multianewarray: {
int klass_index = get_big_index(); int klass_index = get_index_u2();
int nof_dims = get_index(); int nof_dims = get_index_u1();
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(klass_index); symbolOop name = constants->klass_name_at(klass_index);
st->print_cr(" %s %d", name->as_C_string(), nof_dims); st->print_cr(" %s %d", name->as_C_string(), nof_dims);
@ -451,31 +468,31 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
case Bytecodes::_getstatic: case Bytecodes::_getstatic:
case Bytecodes::_putfield: case Bytecodes::_putfield:
case Bytecodes::_getfield: case Bytecodes::_getfield:
print_field_or_method(get_big_index(), st); print_field_or_method(get_index_u2_cpcache(), st);
break; break;
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
print_field_or_method(get_big_index(), st); print_field_or_method(get_index_u2_cpcache(), st);
break; break;
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
{ int i = get_big_index(); { int i = get_index_u2_cpcache();
int n = get_index(); int n = get_index_u1();
get_index(); // ignore zero byte get_byte(); // ignore zero byte
print_field_or_method(i, st); print_field_or_method(i, st);
} }
break; break;
case Bytecodes::_invokedynamic: case Bytecodes::_invokedynamic:
print_field_or_method(get_giant_index(), st); print_field_or_method(get_index_u4(), st);
break; break;
case Bytecodes::_new: case Bytecodes::_new:
case Bytecodes::_checkcast: case Bytecodes::_checkcast:
case Bytecodes::_instanceof: case Bytecodes::_instanceof:
{ int i = get_big_index(); { int i = get_index_u2();
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
symbolOop name = constants->klass_name_at(i); symbolOop name = constants->klass_name_at(i);
st->print_cr(" %d <%s>", i, name->as_C_string()); st->print_cr(" %d <%s>", i, name->as_C_string());

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,14 +37,11 @@
bool Bytecodes::_is_initialized = false; bool Bytecodes::_is_initialized = false;
const char* Bytecodes::_name [Bytecodes::number_of_codes]; const char* Bytecodes::_name [Bytecodes::number_of_codes];
const char* Bytecodes::_format [Bytecodes::number_of_codes];
const char* Bytecodes::_wide_format [Bytecodes::number_of_codes];
BasicType Bytecodes::_result_type [Bytecodes::number_of_codes]; BasicType Bytecodes::_result_type [Bytecodes::number_of_codes];
s_char Bytecodes::_depth [Bytecodes::number_of_codes]; s_char Bytecodes::_depth [Bytecodes::number_of_codes];
u_char Bytecodes::_length [Bytecodes::number_of_codes]; u_char Bytecodes::_lengths [Bytecodes::number_of_codes];
bool Bytecodes::_can_trap [Bytecodes::number_of_codes];
Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes]; Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes];
bool Bytecodes::_can_rewrite [Bytecodes::number_of_codes]; u_short Bytecodes::_flags [(1<<BitsPerByte)*2];
Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) { Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
@ -91,6 +88,7 @@ int Bytecodes::special_length_at(address bcp, address end) {
return (len > 0 && len == (int)len) ? len : -1; return (len > 0 && len == (int)len) ? len : -1;
} }
} }
// Note: Length functions must return <=0 for invalid bytecodes.
return 0; return 0;
} }
@ -124,15 +122,22 @@ void Bytecodes::def(Code code, const char* name, const char* format, const char*
void Bytecodes::def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code) { void Bytecodes::def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code) {
assert(wide_format == NULL || format != NULL, "short form must exist if there's a wide form"); assert(wide_format == NULL || format != NULL, "short form must exist if there's a wide form");
int len = (format != NULL ? (int) strlen(format) : 0);
int wlen = (wide_format != NULL ? (int) strlen(wide_format) : 0);
_name [code] = name; _name [code] = name;
_format [code] = format;
_wide_format [code] = wide_format;
_result_type [code] = result_type; _result_type [code] = result_type;
_depth [code] = depth; _depth [code] = depth;
_can_trap [code] = can_trap; _lengths [code] = (wlen << 4) | (len & 0xF);
_length [code] = format != NULL ? (u_char)strlen(format) : 0;
_java_code [code] = java_code; _java_code [code] = java_code;
if (java_code != code) _can_rewrite[java_code] = true; int bc_flags = 0;
if (can_trap) bc_flags |= _bc_can_trap;
if (java_code != code) bc_flags |= _bc_can_rewrite;
_flags[(u1)code+0*(1<<BitsPerByte)] = compute_flags(format, bc_flags);
_flags[(u1)code+1*(1<<BitsPerByte)] = compute_flags(wide_format, bc_flags);
assert(is_defined(code) == (format != NULL), "");
assert(wide_is_defined(code) == (wide_format != NULL), "");
assert(length_for(code) == len, "");
assert(wide_length_for(code) == wlen, "");
} }
@ -140,23 +145,92 @@ void Bytecodes::def(Code code, const char* name, const char* format, const char*
// //
// b: bytecode // b: bytecode
// c: signed constant, Java byte-ordering // c: signed constant, Java byte-ordering
// i: unsigned index , Java byte-ordering // i: unsigned local index, Java byte-ordering (I = native byte ordering)
// j: unsigned index , native byte-ordering // j: unsigned CP cache index, Java byte-ordering (J = native byte ordering)
// k: unsigned CP index, Java byte-ordering
// o: branch offset, Java byte-ordering // o: branch offset, Java byte-ordering
// _: unused/ignored // _: unused/ignored
// w: wide bytecode // w: wide bytecode
// //
// Note: Right now the format strings are used for 2 purposes: // Note: The format strings are used for 2 purposes:
// 1. to specify the length of the bytecode // 1. to specify the length of the bytecode
// (= number of characters in format string) // (= number of characters in format string)
// 2. to specify the bytecode attributes // 2. to derive bytecode format flags (_fmt_has_k, etc.)
//
// The bytecode attributes are currently used only for bytecode tracing
// (see BytecodeTracer); thus if more specific format information is
// used, one would also have to adjust the bytecode tracer.
// //
// Note: For bytecodes with variable length, the format string is the empty string. // Note: For bytecodes with variable length, the format string is the empty string.
int Bytecodes::compute_flags(const char* format, int more_flags) {
if (format == NULL) return 0; // not even more_flags
int flags = more_flags;
const char* fp = format;
switch (*fp) {
case '\0':
flags |= _fmt_not_simple; // but variable
break;
case 'b':
flags |= _fmt_not_variable; // but simple
++fp; // skip 'b'
break;
case 'w':
flags |= _fmt_not_variable | _fmt_not_simple;
++fp; // skip 'w'
guarantee(*fp == 'b', "wide format must start with 'wb'");
++fp; // skip 'b'
break;
}
int has_nbo = 0, has_jbo = 0, has_size = 0;
for (;;) {
int this_flag = 0;
char fc = *fp++;
switch (fc) {
case '\0': // end of string
assert(flags == (jchar)flags, "change _format_flags");
return flags;
case '_': continue; // ignore these
case 'j': this_flag = _fmt_has_j; has_jbo = 1; break;
case 'k': this_flag = _fmt_has_k; has_jbo = 1; break;
case 'i': this_flag = _fmt_has_i; has_jbo = 1; break;
case 'c': this_flag = _fmt_has_c; has_jbo = 1; break;
case 'o': this_flag = _fmt_has_o; has_jbo = 1; break;
// uppercase versions mark native byte order (from Rewriter)
// actually, only the 'J' case happens currently
case 'J': this_flag = _fmt_has_j; has_nbo = 1; break;
case 'K': this_flag = _fmt_has_k; has_nbo = 1; break;
case 'I': this_flag = _fmt_has_i; has_nbo = 1; break;
case 'C': this_flag = _fmt_has_c; has_nbo = 1; break;
case 'O': this_flag = _fmt_has_o; has_nbo = 1; break;
default: guarantee(false, "bad char in format");
}
flags |= this_flag;
guarantee(!(has_jbo && has_nbo), "mixed byte orders in format");
if (has_nbo)
flags |= _fmt_has_nbo;
int this_size = 1;
if (*fp == fc) {
// advance beyond run of the same characters
this_size = 2;
while (*++fp == fc) this_size++;
switch (this_size) {
case 2: flags |= _fmt_has_u2; break;
case 4: flags |= _fmt_has_u4; break;
default: guarantee(false, "bad rep count in format");
}
}
guarantee(has_size == 0 || // no field yet
this_size == has_size || // same size
this_size < has_size && *fp == '\0', // last field can be short
"mixed field sizes in format");
has_size = this_size;
}
}
void Bytecodes::initialize() { void Bytecodes::initialize() {
if (_is_initialized) return; if (_is_initialized) return;
assert(number_of_codes <= 256, "too many bytecodes"); assert(number_of_codes <= 256, "too many bytecodes");
@ -191,9 +265,9 @@ void Bytecodes::initialize() {
def(_dconst_1 , "dconst_1" , "b" , NULL , T_DOUBLE , 2, false); def(_dconst_1 , "dconst_1" , "b" , NULL , T_DOUBLE , 2, false);
def(_bipush , "bipush" , "bc" , NULL , T_INT , 1, false); def(_bipush , "bipush" , "bc" , NULL , T_INT , 1, false);
def(_sipush , "sipush" , "bcc" , NULL , T_INT , 1, false); def(_sipush , "sipush" , "bcc" , NULL , T_INT , 1, false);
def(_ldc , "ldc" , "bi" , NULL , T_ILLEGAL, 1, true ); def(_ldc , "ldc" , "bk" , NULL , T_ILLEGAL, 1, true );
def(_ldc_w , "ldc_w" , "bii" , NULL , T_ILLEGAL, 1, true ); def(_ldc_w , "ldc_w" , "bkk" , NULL , T_ILLEGAL, 1, true );
def(_ldc2_w , "ldc2_w" , "bii" , NULL , T_ILLEGAL, 2, true ); def(_ldc2_w , "ldc2_w" , "bkk" , NULL , T_ILLEGAL, 2, true );
def(_iload , "iload" , "bi" , "wbii" , T_INT , 1, false); def(_iload , "iload" , "bi" , "wbii" , T_INT , 1, false);
def(_lload , "lload" , "bi" , "wbii" , T_LONG , 2, false); def(_lload , "lload" , "bi" , "wbii" , T_LONG , 2, false);
def(_fload , "fload" , "bi" , "wbii" , T_FLOAT , 1, false); def(_fload , "fload" , "bi" , "wbii" , T_FLOAT , 1, false);
@ -351,26 +425,26 @@ void Bytecodes::initialize() {
def(_dreturn , "dreturn" , "b" , NULL , T_DOUBLE , -2, true); def(_dreturn , "dreturn" , "b" , NULL , T_DOUBLE , -2, true);
def(_areturn , "areturn" , "b" , NULL , T_OBJECT , -1, true); def(_areturn , "areturn" , "b" , NULL , T_OBJECT , -1, true);
def(_return , "return" , "b" , NULL , T_VOID , 0, true); def(_return , "return" , "b" , NULL , T_VOID , 0, true);
def(_getstatic , "getstatic" , "bjj" , NULL , T_ILLEGAL, 1, true ); def(_getstatic , "getstatic" , "bJJ" , NULL , T_ILLEGAL, 1, true );
def(_putstatic , "putstatic" , "bjj" , NULL , T_ILLEGAL, -1, true ); def(_putstatic , "putstatic" , "bJJ" , NULL , T_ILLEGAL, -1, true );
def(_getfield , "getfield" , "bjj" , NULL , T_ILLEGAL, 0, true ); def(_getfield , "getfield" , "bJJ" , NULL , T_ILLEGAL, 0, true );
def(_putfield , "putfield" , "bjj" , NULL , T_ILLEGAL, -2, true ); def(_putfield , "putfield" , "bJJ" , NULL , T_ILLEGAL, -2, true );
def(_invokevirtual , "invokevirtual" , "bjj" , NULL , T_ILLEGAL, -1, true); def(_invokevirtual , "invokevirtual" , "bJJ" , NULL , T_ILLEGAL, -1, true);
def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true); def(_invokespecial , "invokespecial" , "bJJ" , NULL , T_ILLEGAL, -1, true);
def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true); def(_invokestatic , "invokestatic" , "bJJ" , NULL , T_ILLEGAL, 0, true);
def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true); def(_invokeinterface , "invokeinterface" , "bJJ__", NULL , T_ILLEGAL, -1, true);
def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, 0, true ); def(_invokedynamic , "invokedynamic" , "bJJJJ", NULL , T_ILLEGAL, 0, true );
def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true ); def(_new , "new" , "bkk" , NULL , T_OBJECT , 1, true );
def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true ); def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true );
def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true ); def(_anewarray , "anewarray" , "bkk" , NULL , T_OBJECT , 0, true );
def(_arraylength , "arraylength" , "b" , NULL , T_VOID , 0, true ); def(_arraylength , "arraylength" , "b" , NULL , T_VOID , 0, true );
def(_athrow , "athrow" , "b" , NULL , T_VOID , -1, true ); def(_athrow , "athrow" , "b" , NULL , T_VOID , -1, true );
def(_checkcast , "checkcast" , "bii" , NULL , T_OBJECT , 0, true ); def(_checkcast , "checkcast" , "bkk" , NULL , T_OBJECT , 0, true );
def(_instanceof , "instanceof" , "bii" , NULL , T_INT , 0, true ); def(_instanceof , "instanceof" , "bkk" , NULL , T_INT , 0, true );
def(_monitorenter , "monitorenter" , "b" , NULL , T_VOID , -1, true ); def(_monitorenter , "monitorenter" , "b" , NULL , T_VOID , -1, true );
def(_monitorexit , "monitorexit" , "b" , NULL , T_VOID , -1, true ); def(_monitorexit , "monitorexit" , "b" , NULL , T_VOID , -1, true );
def(_wide , "wide" , "" , NULL , T_VOID , 0, false); def(_wide , "wide" , "" , NULL , T_VOID , 0, false);
def(_multianewarray , "multianewarray" , "biic" , NULL , T_OBJECT , 1, true ); def(_multianewarray , "multianewarray" , "bkkc" , NULL , T_OBJECT , 1, true );
def(_ifnull , "ifnull" , "boo" , NULL , T_VOID , -1, false); def(_ifnull , "ifnull" , "boo" , NULL , T_VOID , -1, false);
def(_ifnonnull , "ifnonnull" , "boo" , NULL , T_VOID , -1, false); def(_ifnonnull , "ifnonnull" , "boo" , NULL , T_VOID , -1, false);
def(_goto_w , "goto_w" , "boooo", NULL , T_VOID , 0, false); def(_goto_w , "goto_w" , "boooo", NULL , T_VOID , 0, false);
@ -380,35 +454,35 @@ void Bytecodes::initialize() {
// JVM bytecodes // JVM bytecodes
// bytecode bytecode name format wide f. result tp stk traps std code // bytecode bytecode name format wide f. result tp stk traps std code
def(_fast_agetfield , "fast_agetfield" , "bjj" , NULL , T_OBJECT , 0, true , _getfield ); def(_fast_agetfield , "fast_agetfield" , "bJJ" , NULL , T_OBJECT , 0, true , _getfield );
def(_fast_bgetfield , "fast_bgetfield" , "bjj" , NULL , T_INT , 0, true , _getfield ); def(_fast_bgetfield , "fast_bgetfield" , "bJJ" , NULL , T_INT , 0, true , _getfield );
def(_fast_cgetfield , "fast_cgetfield" , "bjj" , NULL , T_CHAR , 0, true , _getfield ); def(_fast_cgetfield , "fast_cgetfield" , "bJJ" , NULL , T_CHAR , 0, true , _getfield );
def(_fast_dgetfield , "fast_dgetfield" , "bjj" , NULL , T_DOUBLE , 0, true , _getfield ); def(_fast_dgetfield , "fast_dgetfield" , "bJJ" , NULL , T_DOUBLE , 0, true , _getfield );
def(_fast_fgetfield , "fast_fgetfield" , "bjj" , NULL , T_FLOAT , 0, true , _getfield ); def(_fast_fgetfield , "fast_fgetfield" , "bJJ" , NULL , T_FLOAT , 0, true , _getfield );
def(_fast_igetfield , "fast_igetfield" , "bjj" , NULL , T_INT , 0, true , _getfield ); def(_fast_igetfield , "fast_igetfield" , "bJJ" , NULL , T_INT , 0, true , _getfield );
def(_fast_lgetfield , "fast_lgetfield" , "bjj" , NULL , T_LONG , 0, true , _getfield ); def(_fast_lgetfield , "fast_lgetfield" , "bJJ" , NULL , T_LONG , 0, true , _getfield );
def(_fast_sgetfield , "fast_sgetfield" , "bjj" , NULL , T_SHORT , 0, true , _getfield ); def(_fast_sgetfield , "fast_sgetfield" , "bJJ" , NULL , T_SHORT , 0, true , _getfield );
def(_fast_aputfield , "fast_aputfield" , "bjj" , NULL , T_OBJECT , 0, true , _putfield ); def(_fast_aputfield , "fast_aputfield" , "bJJ" , NULL , T_OBJECT , 0, true , _putfield );
def(_fast_bputfield , "fast_bputfield" , "bjj" , NULL , T_INT , 0, true , _putfield ); def(_fast_bputfield , "fast_bputfield" , "bJJ" , NULL , T_INT , 0, true , _putfield );
def(_fast_cputfield , "fast_cputfield" , "bjj" , NULL , T_CHAR , 0, true , _putfield ); def(_fast_cputfield , "fast_cputfield" , "bJJ" , NULL , T_CHAR , 0, true , _putfield );
def(_fast_dputfield , "fast_dputfield" , "bjj" , NULL , T_DOUBLE , 0, true , _putfield ); def(_fast_dputfield , "fast_dputfield" , "bJJ" , NULL , T_DOUBLE , 0, true , _putfield );
def(_fast_fputfield , "fast_fputfield" , "bjj" , NULL , T_FLOAT , 0, true , _putfield ); def(_fast_fputfield , "fast_fputfield" , "bJJ" , NULL , T_FLOAT , 0, true , _putfield );
def(_fast_iputfield , "fast_iputfield" , "bjj" , NULL , T_INT , 0, true , _putfield ); def(_fast_iputfield , "fast_iputfield" , "bJJ" , NULL , T_INT , 0, true , _putfield );
def(_fast_lputfield , "fast_lputfield" , "bjj" , NULL , T_LONG , 0, true , _putfield ); def(_fast_lputfield , "fast_lputfield" , "bJJ" , NULL , T_LONG , 0, true , _putfield );
def(_fast_sputfield , "fast_sputfield" , "bjj" , NULL , T_SHORT , 0, true , _putfield ); def(_fast_sputfield , "fast_sputfield" , "bJJ" , NULL , T_SHORT , 0, true , _putfield );
def(_fast_aload_0 , "fast_aload_0" , "b" , NULL , T_OBJECT , 1, true , _aload_0 ); def(_fast_aload_0 , "fast_aload_0" , "b" , NULL , T_OBJECT , 1, true , _aload_0 );
def(_fast_iaccess_0 , "fast_iaccess_0" , "b_jj" , NULL , T_INT , 1, true , _aload_0 ); def(_fast_iaccess_0 , "fast_iaccess_0" , "b_JJ" , NULL , T_INT , 1, true , _aload_0 );
def(_fast_aaccess_0 , "fast_aaccess_0" , "b_jj" , NULL , T_OBJECT , 1, true , _aload_0 ); def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , NULL , T_OBJECT , 1, true , _aload_0 );
def(_fast_faccess_0 , "fast_faccess_0" , "b_jj" , NULL , T_OBJECT , 1, true , _aload_0 ); def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , NULL , T_OBJECT , 1, true , _aload_0 );
def(_fast_iload , "fast_iload" , "bi" , NULL , T_INT , 1, false, _iload); def(_fast_iload , "fast_iload" , "bi" , NULL , T_INT , 1, false, _iload);
def(_fast_iload2 , "fast_iload2" , "bi_i" , NULL , T_INT , 2, false, _iload); def(_fast_iload2 , "fast_iload2" , "bi_i" , NULL , T_INT , 2, false, _iload);
def(_fast_icaload , "fast_icaload" , "bi_" , NULL , T_INT , 0, false, _iload); def(_fast_icaload , "fast_icaload" , "bi_" , NULL , T_INT , 0, false, _iload);
// Faster method invocation. // Faster method invocation.
def(_fast_invokevfinal , "fast_invokevfinal" , "bjj" , NULL , T_ILLEGAL, -1, true, _invokevirtual ); def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , NULL , T_ILLEGAL, -1, true, _invokevirtual );
def(_fast_linearswitch , "fast_linearswitch" , "" , NULL , T_VOID , -1, false, _lookupswitch ); def(_fast_linearswitch , "fast_linearswitch" , "" , NULL , T_VOID , -1, false, _lookupswitch );
def(_fast_binaryswitch , "fast_binaryswitch" , "" , NULL , T_VOID , -1, false, _lookupswitch ); def(_fast_binaryswitch , "fast_binaryswitch" , "" , NULL , T_VOID , -1, false, _lookupswitch );

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -280,17 +280,43 @@ class Bytecodes: AllStatic {
number_of_codes number_of_codes
}; };
// Flag bits derived from format strings, can_trap, can_rewrite, etc.:
enum Flags {
// semantic flags:
_bc_can_trap = 1<<0, // bytecode execution can trap or block
_bc_can_rewrite = 1<<1, // bytecode execution has an alternate form
// format bits (determined only by the format string):
_fmt_has_c = 1<<2, // constant, such as sipush "bcc"
_fmt_has_j = 1<<3, // constant pool cache index, such as getfield "bjj"
_fmt_has_k = 1<<4, // constant pool index, such as ldc "bk"
_fmt_has_i = 1<<5, // local index, such as iload
_fmt_has_o = 1<<6, // offset, such as ifeq
_fmt_has_nbo = 1<<7, // contains native-order field(s)
_fmt_has_u2 = 1<<8, // contains double-byte field(s)
_fmt_has_u4 = 1<<9, // contains quad-byte field
_fmt_not_variable = 1<<10, // not of variable length (simple or wide)
_fmt_not_simple = 1<<11, // either wide or variable length
_all_fmt_bits = (_fmt_not_simple*2 - _fmt_has_c),
// Example derived format syndromes:
_fmt_b = _fmt_not_variable,
_fmt_bc = _fmt_b | _fmt_has_c,
_fmt_bi = _fmt_b | _fmt_has_i,
_fmt_bkk = _fmt_b | _fmt_has_k | _fmt_has_u2,
_fmt_bJJ = _fmt_b | _fmt_has_j | _fmt_has_u2 | _fmt_has_nbo,
_fmt_bo2 = _fmt_b | _fmt_has_o | _fmt_has_u2,
_fmt_bo4 = _fmt_b | _fmt_has_o | _fmt_has_u4
};
private: private:
static bool _is_initialized; static bool _is_initialized;
static const char* _name [number_of_codes]; static const char* _name [number_of_codes];
static const char* _format [number_of_codes];
static const char* _wide_format [number_of_codes];
static BasicType _result_type [number_of_codes]; static BasicType _result_type [number_of_codes];
static s_char _depth [number_of_codes]; static s_char _depth [number_of_codes];
static u_char _length [number_of_codes]; static u_char _lengths [number_of_codes];
static bool _can_trap [number_of_codes];
static Code _java_code [number_of_codes]; static Code _java_code [number_of_codes];
static bool _can_rewrite [number_of_codes]; static jchar _flags [(1<<BitsPerByte)*2]; // all second page for wide formats
static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap); static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap);
static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code); static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code);
@ -322,24 +348,20 @@ class Bytecodes: AllStatic {
static Code non_breakpoint_code_at(address bcp, methodOop method = NULL); static Code non_breakpoint_code_at(address bcp, methodOop method = NULL);
// Bytecode attributes // Bytecode attributes
static bool is_defined (int code) { return 0 <= code && code < number_of_codes && _format[code] != NULL; } static bool is_defined (int code) { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
static bool wide_is_defined(int code) { return is_defined(code) && _wide_format[code] != NULL; } static bool wide_is_defined(int code) { return is_defined(code) && flags(code, true) != 0; }
static const char* name (Code code) { check(code); return _name [code]; } static const char* name (Code code) { check(code); return _name [code]; }
static const char* format (Code code) { check(code); return _format [code]; }
static const char* wide_format (Code code) { return _wide_format[code]; }
static BasicType result_type (Code code) { check(code); return _result_type [code]; } static BasicType result_type (Code code) { check(code); return _result_type [code]; }
static int depth (Code code) { check(code); return _depth [code]; } static int depth (Code code) { check(code); return _depth [code]; }
static int length_for (Code code) { return _length[code]; } // Note: Length functions must return <=0 for invalid bytecodes.
static bool can_trap (Code code) { check(code); return _can_trap [code]; } // Calling check(code) in length functions would throw an unwanted assert.
static int length_for (Code code) { /*no check*/ return _lengths [code] & 0xF; }
static int wide_length_for(Code code) { /*no check*/ return _lengths [code] >> 4; }
static bool can_trap (Code code) { check(code); return has_all_flags(code, _bc_can_trap, false); }
static Code java_code (Code code) { check(code); return _java_code [code]; } static Code java_code (Code code) { check(code); return _java_code [code]; }
static bool can_rewrite (Code code) { check(code); return _can_rewrite [code]; } static bool can_rewrite (Code code) { check(code); return has_all_flags(code, _bc_can_rewrite, false); }
static int wide_length_for(Code code) { static bool native_byte_order(Code code) { check(code); return has_all_flags(code, _fmt_has_nbo, false); }
if (!is_defined(code)) { static bool uses_cp_cache (Code code) { check(code); return has_all_flags(code, _fmt_has_j, false); }
return 0;
}
const char* wf = wide_format(code);
return (wf == NULL) ? 0 : (int)strlen(wf);
}
// if 'end' is provided, it indicates the end of the code buffer which // if 'end' is provided, it indicates the end of the code buffer which
// should not be read past when parsing. // should not be read past when parsing.
static int special_length_at(address bcp, address end = NULL); static int special_length_at(address bcp, address end = NULL);
@ -355,6 +377,16 @@ class Bytecodes: AllStatic {
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0 static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); } || code == _fconst_0 || code == _dconst_0); }
static int compute_flags (const char* format, int more_flags = 0); // compute the flags
static int flags (int code, bool is_wide) {
assert(code == (u_char)code, "must be a byte");
return _flags[code + (is_wide ? (1<<BitsPerByte) : 0)];
}
static int format_bits (Code code, bool is_wide) { return flags(code, is_wide) & _all_fmt_bits; }
static bool has_all_flags (Code code, int test_flags, bool is_wide) {
return (flags(code, is_wide) & test_flags) == test_flags;
}
// Initialization // Initialization
static void initialize (); static void initialize ();
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -226,8 +226,9 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
// not yet been executed (in Java semantics, not in actual operation). // not yet been executed (in Java semantics, not in actual operation).
bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) { bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
address bcp = method->bcp_from(bci); address bcp = method->bcp_from(bci);
Bytecodes::Code code = Bytecodes::code_at(bcp, method());
if (!Bytecode_at(bcp)->must_rewrite()) { if (!Bytecode_at(bcp)->must_rewrite(code)) {
// might have been reached // might have been reached
return false; return false;
} }

View file

@ -63,7 +63,7 @@ void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) {
IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide)) IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
// access constant pool // access constant pool
constantPoolOop pool = method(thread)->constants(); constantPoolOop pool = method(thread)->constants();
int index = wide ? two_byte_index(thread) : one_byte_index(thread); int index = wide ? get_index_u2(thread, Bytecodes::_ldc_w) : get_index_u1(thread, Bytecodes::_ldc);
constantTag tag = pool->tag_at(index); constantTag tag = pool->tag_at(index);
if (tag.is_unresolved_klass() || tag.is_klass()) { if (tag.is_unresolved_klass() || tag.is_klass()) {
@ -135,7 +135,7 @@ IRT_END
IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address)) IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
// We may want to pass in more arguments - could make this slightly faster // We may want to pass in more arguments - could make this slightly faster
constantPoolOop constants = method(thread)->constants(); constantPoolOop constants = method(thread)->constants();
int i = two_byte_index(thread); int i = get_index_u2(thread, Bytecodes::_multianewarray);
klassOop klass = constants->klass_at(i, CHECK); klassOop klass = constants->klass_at(i, CHECK);
int nof_dims = number_of_dimensions(thread); int nof_dims = number_of_dimensions(thread);
assert(oop(klass)->is_klass(), "not a class"); assert(oop(klass)->is_klass(), "not a class");
@ -169,7 +169,7 @@ IRT_END
// Quicken instance-of and check-cast bytecodes // Quicken instance-of and check-cast bytecodes
IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread)) IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
// Force resolving; quicken the bytecode // Force resolving; quicken the bytecode
int which = two_byte_index(thread); int which = get_index_u2(thread, Bytecodes::_checkcast);
constantPoolOop cpool = method(thread)->constants(); constantPoolOop cpool = method(thread)->constants();
// We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
// program we might have seen an unquick'd bytecode in the interpreter but have another // program we might have seen an unquick'd bytecode in the interpreter but have another
@ -463,7 +463,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
{ {
JvmtiHideSingleStepping jhss(thread); JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_field(info, pool, two_byte_index(thread), LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
bytecode, false, CHECK); bytecode, false, CHECK);
} // end JvmtiHideSingleStepping } // end JvmtiHideSingleStepping
@ -634,7 +634,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
{ {
JvmtiHideSingleStepping jhss(thread); JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_invoke(info, receiver, pool, LinkResolver::resolve_invoke(info, receiver, pool,
two_byte_index(thread), bytecode, CHECK); get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
if (JvmtiExport::can_hotswap_or_post_breakpoint()) { if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
int retry_count = 0; int retry_count = 0;
while (info.resolved_method()->is_old()) { while (info.resolved_method()->is_old()) {
@ -645,7 +645,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
"Could not resolve to latest version of redefined method"); "Could not resolve to latest version of redefined method");
// method is redefined in the middle of resolve so re-try. // method is redefined in the middle of resolve so re-try.
LinkResolver::resolve_invoke(info, receiver, pool, LinkResolver::resolve_invoke(info, receiver, pool,
two_byte_index(thread), bytecode, CHECK); get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
} }
} }
} // end JvmtiHideSingleStepping } // end JvmtiHideSingleStepping
@ -704,7 +704,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
caller_bci = caller_method->bci_from(caller_bcp); caller_bci = caller_method->bci_from(caller_bcp);
site_index = Bytes::get_native_u4(caller_bcp+1); site_index = Bytes::get_native_u4(caller_bcp+1);
} }
assert(site_index == four_byte_index(thread), ""); assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format"); assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
// there is a second CPC entries that is of interest; it caches signature info: // there is a second CPC entries that is of interest; it caches signature info:
int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index(); int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,9 +40,13 @@ class InterpreterRuntime: AllStatic {
return Bytecodes::code_at(bcp(thread), method(thread)); return Bytecodes::code_at(bcp(thread), method(thread));
} }
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); } static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; } static Bytecode* bytecode(JavaThread *thread) { return Bytecode_at(bcp(thread)); }
static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); } static int get_index_u1(JavaThread *thread, Bytecodes::Code bc)
static int four_byte_index(JavaThread *thread) { return Bytes::get_native_u4(bcp(thread) + 1); } { return bytecode(thread)->get_index_u1(bc); }
static int get_index_u2(JavaThread *thread, Bytecodes::Code bc)
{ return bytecode(thread)->get_index_u2(bc); }
static int get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
{ return bytecode(thread)->get_index_u2_cpcache(bc); }
static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; } static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; }
static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); } static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -103,16 +103,15 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
// Rewrite a classfile-order CP index into a native-order CPC index. // Rewrite a classfile-order CP index into a native-order CPC index.
int Rewriter::rewrite_member_reference(address bcp, int offset) { void Rewriter::rewrite_member_reference(address bcp, int offset) {
address p = bcp + offset; address p = bcp + offset;
int cp_index = Bytes::get_Java_u2(p); int cp_index = Bytes::get_Java_u2(p);
int cache_index = cp_entry_to_cp_cache(cp_index); int cache_index = cp_entry_to_cp_cache(cp_index);
Bytes::put_native_u2(p, cache_index); Bytes::put_native_u2(p, cache_index);
return cp_index;
} }
void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) { void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
address p = bcp + offset; address p = bcp + offset;
assert(p[-1] == Bytecodes::_invokedynamic, ""); assert(p[-1] == Bytecodes::_invokedynamic, "");
int cp_index = Bytes::get_Java_u2(p); int cp_index = Bytes::get_Java_u2(p);
@ -178,7 +177,7 @@ void Rewriter::scan_method(methodOop method) {
case Bytecodes::_lookupswitch : { case Bytecodes::_lookupswitch : {
#ifndef CC_INTERP #ifndef CC_INTERP
Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp); Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
bc->set_code( (*bcp) = (
bc->number_of_pairs() < BinarySwitchThreshold bc->number_of_pairs() < BinarySwitchThreshold
? Bytecodes::_fast_linearswitch ? Bytecodes::_fast_linearswitch
: Bytecodes::_fast_binaryswitch : Bytecodes::_fast_binaryswitch
@ -197,7 +196,7 @@ void Rewriter::scan_method(methodOop method) {
rewrite_member_reference(bcp, prefix_length+1); rewrite_member_reference(bcp, prefix_length+1);
break; break;
case Bytecodes::_invokedynamic: case Bytecodes::_invokedynamic:
rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME")); rewrite_invokedynamic(bcp, prefix_length+1);
break; break;
case Bytecodes::_jsr : // fall through case Bytecodes::_jsr : // fall through
case Bytecodes::_jsr_w : nof_jsrs++; break; case Bytecodes::_jsr_w : nof_jsrs++; break;
@ -308,5 +307,19 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArray
// Set up method entry points for compiler and interpreter. // Set up method entry points for compiler and interpreter.
m->link_method(m, CHECK); m->link_method(m, CHECK);
#ifdef ASSERT
if (StressMethodComparator) {
static int nmc = 0;
for (int j = i; j >= 0 && j >= i-4; j--) {
if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc);
bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
if (j == i && !z) {
tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
assert(z, "method must compare equal to itself");
}
}
}
#endif //ASSERT
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -64,8 +64,8 @@ class Rewriter: public StackObj {
void scan_method(methodOop m); void scan_method(methodOop m);
methodHandle rewrite_jsrs(methodHandle m, TRAPS); methodHandle rewrite_jsrs(methodHandle m, TRAPS);
void rewrite_Object_init(methodHandle m, TRAPS); void rewrite_Object_init(methodHandle m, TRAPS);
int rewrite_member_reference(address bcp, int offset); void rewrite_member_reference(address bcp, int offset);
void rewrite_invokedynamic(address bcp, int offset, int cp_index); void rewrite_invokedynamic(address bcp, int offset);
public: public:
// Driver routine: // Driver routine:

Some files were not shown because too many files have changed in this diff Show more