mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 22:34:27 +02:00
Merge
This commit is contained in:
commit
315ec64dce
129 changed files with 2111 additions and 2336 deletions
|
@ -1740,7 +1740,7 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
|
|||
else if (f.isCompiledFrame()) { tty.print("compiled"); }
|
||||
else if (f.isEntryFrame()) { tty.print("entry"); }
|
||||
else if (f.isNativeFrame()) { tty.print("native"); }
|
||||
else if (f.isGlueFrame()) { tty.print("glue"); }
|
||||
else if (f.isRuntimeFrame()) { tty.print("runtime"); }
|
||||
else { tty.print("external"); }
|
||||
tty.print(" frame with PC = " + f.getPC() + ", SP = " + f.getSP() + ", FP = " + f.getFP());
|
||||
if (f.isSignalHandlerFrameDbg()) {
|
||||
|
|
|
@ -102,6 +102,11 @@ public class CodeBlob extends VMObject {
|
|||
/** On-Stack Replacement method */
|
||||
public boolean isOSRMethod() { return false; }
|
||||
|
||||
public NMethod asNMethodOrNull() {
|
||||
if (isNMethod()) return (NMethod)this;
|
||||
return null;
|
||||
}
|
||||
|
||||
// Boundaries
|
||||
public Address headerBegin() {
|
||||
return addr;
|
||||
|
@ -195,7 +200,7 @@ public class CodeBlob extends VMObject {
|
|||
}
|
||||
|
||||
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments
|
||||
public boolean callerMustGCArguments(JavaThread thread) { return false; }
|
||||
public boolean callerMustGCArguments() { return false; }
|
||||
|
||||
public String getName() {
|
||||
return CStringUtilities.getString(nameField.getValue(addr));
|
||||
|
|
|
@ -59,6 +59,7 @@ public class CodeCache {
|
|||
virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
|
||||
virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
|
||||
virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
|
||||
virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class);
|
||||
virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
|
||||
virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
|
||||
if (VM.getVM().isServerCompiler()) {
|
||||
|
@ -126,6 +127,10 @@ public class CodeCache {
|
|||
Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)),
|
||||
"found wrong CodeBlob");
|
||||
}
|
||||
if (result.isRicochetBlob()) {
|
||||
// This should probably be done for other SingletonBlobs
|
||||
return VM.getVM().ricochetBlob();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -22,31 +22,37 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.amd64;
|
||||
package sun.jvm.hotspot.code;
|
||||
|
||||
import sun.jvm.hotspot.asm.amd64.*;
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class AMD64RegisterMap extends RegisterMap {
|
||||
|
||||
/** This is the only public constructor */
|
||||
public AMD64RegisterMap(JavaThread thread, boolean updateMap) {
|
||||
super(thread, updateMap);
|
||||
public class MethodHandlesAdapterBlob extends AdapterBlob {
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
protected AMD64RegisterMap(RegisterMap map) {
|
||||
super(map);
|
||||
private static void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("MethodHandlesAdapterBlob");
|
||||
|
||||
// FIXME: add any needed fields
|
||||
}
|
||||
|
||||
public Object clone() {
|
||||
AMD64RegisterMap retval = new AMD64RegisterMap(this);
|
||||
return retval;
|
||||
public MethodHandlesAdapterBlob(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
// no PD state to clear or copy:
|
||||
protected void clearPD() {}
|
||||
protected void initializePD() {}
|
||||
protected void initializeFromPD(RegisterMap map) {}
|
||||
protected Address getLocationPD(VMReg reg) { return null; }
|
||||
public boolean isMethodHandlesAdapterBlob() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return "MethodHandlesAdapterBlob: " + super.getName();
|
||||
}
|
||||
}
|
|
@ -46,6 +46,7 @@ public class NMethod extends CodeBlob {
|
|||
/** Offsets for different nmethod parts */
|
||||
private static CIntegerField exceptionOffsetField;
|
||||
private static CIntegerField deoptOffsetField;
|
||||
private static CIntegerField deoptMhOffsetField;
|
||||
private static CIntegerField origPCOffsetField;
|
||||
private static CIntegerField stubOffsetField;
|
||||
private static CIntegerField oopsOffsetField;
|
||||
|
@ -95,6 +96,7 @@ public class NMethod extends CodeBlob {
|
|||
|
||||
exceptionOffsetField = type.getCIntegerField("_exception_offset");
|
||||
deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
|
||||
deoptMhOffsetField = type.getCIntegerField("_deoptimize_mh_offset");
|
||||
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
|
||||
stubOffsetField = type.getCIntegerField("_stub_offset");
|
||||
oopsOffsetField = type.getCIntegerField("_oops_offset");
|
||||
|
@ -139,7 +141,8 @@ public class NMethod extends CodeBlob {
|
|||
public Address instsBegin() { return codeBegin(); }
|
||||
public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); }
|
||||
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
|
||||
public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
|
||||
public Address deoptHandlerBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
|
||||
public Address deoptMhHandlerBegin() { return headerBegin().addOffsetTo(getDeoptMhOffset()); }
|
||||
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
|
||||
public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
|
||||
public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
|
||||
|
@ -250,6 +253,22 @@ public class NMethod extends CodeBlob {
|
|||
return (int) scavengeRootStateField.getValue(addr);
|
||||
}
|
||||
|
||||
// MethodHandle
|
||||
public boolean isMethodHandleReturn(Address returnPc) {
|
||||
// Hard to read a bit fields from Java and it's only there for performance
|
||||
// so just go directly to the PCDesc
|
||||
// if (!hasMethodHandleInvokes()) return false;
|
||||
PCDesc pd = getPCDescAt(returnPc);
|
||||
if (pd == null)
|
||||
return false;
|
||||
return pd.isMethodHandleInvoke();
|
||||
}
|
||||
|
||||
// Deopt
|
||||
// Return true is the PC is one would expect if the frame is being deopted.
|
||||
public boolean isDeoptPc (Address pc) { return isDeoptEntry(pc) || isDeoptMhEntry(pc); }
|
||||
public boolean isDeoptEntry (Address pc) { return pc == deoptHandlerBegin(); }
|
||||
public boolean isDeoptMhEntry (Address pc) { return pc == deoptMhHandlerBegin(); }
|
||||
|
||||
/** Tells whether frames described by this nmethod can be
|
||||
deoptimized. Note: native wrappers cannot be deoptimized. */
|
||||
|
@ -388,6 +407,7 @@ public class NMethod extends CodeBlob {
|
|||
private int getEntryBCI() { return (int) entryBCIField .getValue(addr); }
|
||||
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
|
||||
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
|
||||
private int getDeoptMhOffset() { return (int) deoptMhOffsetField .getValue(addr); }
|
||||
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
|
||||
private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
|
||||
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
|
||||
|
|
|
@ -38,6 +38,9 @@ public class PCDesc extends VMObject {
|
|||
private static CIntegerField scopeDecodeOffsetField;
|
||||
private static CIntegerField objDecodeOffsetField;
|
||||
private static CIntegerField pcFlagsField;
|
||||
private static int reexecuteMask;
|
||||
private static int isMethodHandleInvokeMask;
|
||||
private static int returnOopMask;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
|
@ -54,6 +57,10 @@ public class PCDesc extends VMObject {
|
|||
scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset");
|
||||
objDecodeOffsetField = type.getCIntegerField("_obj_decode_offset");
|
||||
pcFlagsField = type.getCIntegerField("_flags");
|
||||
|
||||
reexecuteMask = db.lookupIntConstant("PcDesc::PCDESC_reexecute");
|
||||
isMethodHandleInvokeMask = db.lookupIntConstant("PcDesc::PCDESC_is_method_handle_invoke");
|
||||
returnOopMask = db.lookupIntConstant("PcDesc::PCDESC_return_oop");
|
||||
}
|
||||
|
||||
public PCDesc(Address addr) {
|
||||
|
@ -81,7 +88,12 @@ public class PCDesc extends VMObject {
|
|||
|
||||
public boolean getReexecute() {
|
||||
int flags = (int)pcFlagsField.getValue(addr);
|
||||
return ((flags & 0x1)== 1); //first is the reexecute bit
|
||||
return (flags & reexecuteMask) != 0;
|
||||
}
|
||||
|
||||
public boolean isMethodHandleInvoke() {
|
||||
int flags = (int)pcFlagsField.getValue(addr);
|
||||
return (flags & isMethodHandleInvokeMask) != 0;
|
||||
}
|
||||
|
||||
public void print(NMethod code) {
|
||||
|
|
|
@ -41,11 +41,15 @@ public class RicochetBlob extends SingletonBlob {
|
|||
}
|
||||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
// Type type = db.lookupType("RicochetBlob");
|
||||
Type type = db.lookupType("RicochetBlob");
|
||||
|
||||
// FIXME: add any needed fields
|
||||
bounceOffsetField = type.getCIntegerField("_bounce_offset");
|
||||
exceptionOffsetField = type.getCIntegerField("_exception_offset");
|
||||
}
|
||||
|
||||
private static CIntegerField bounceOffsetField;
|
||||
private static CIntegerField exceptionOffsetField;
|
||||
|
||||
public RicochetBlob(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
@ -53,4 +57,14 @@ public class RicochetBlob extends SingletonBlob {
|
|||
public boolean isRicochetBlob() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public Address bounceAddr() {
|
||||
return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr));
|
||||
}
|
||||
|
||||
public boolean returnsToBounceAddr(Address pc) {
|
||||
Address bouncePc = bounceAddr();
|
||||
return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import sun.jvm.hotspot.runtime.*;
|
|||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class RuntimeStub extends CodeBlob {
|
||||
private static CIntegerField callerMustGCArgumentsField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
|
@ -40,6 +42,7 @@ public class RuntimeStub extends CodeBlob {
|
|||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("RuntimeStub");
|
||||
callerMustGCArgumentsField = type.getCIntegerField("_caller_must_gc_arguments");
|
||||
|
||||
// FIXME: add any needed fields
|
||||
}
|
||||
|
@ -52,6 +55,11 @@ public class RuntimeStub extends CodeBlob {
|
|||
return true;
|
||||
}
|
||||
|
||||
public boolean callerMustGCArguments() {
|
||||
return callerMustGCArgumentsField.getValue(addr) != 0;
|
||||
}
|
||||
|
||||
|
||||
public String getName() {
|
||||
return "RuntimeStub: " + super.getName();
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ public class OopMapSet extends VMObject {
|
|||
}
|
||||
|
||||
// Check if caller must update oop argument
|
||||
regMap.setIncludeArgumentOops(cb.callerMustGCArguments(regMap.getThread()));
|
||||
regMap.setIncludeArgumentOops(cb.callerMustGCArguments());
|
||||
|
||||
int nofCallee = 0;
|
||||
Address[] locs = new Address[2 * REG_COUNT + 1];
|
||||
|
|
|
@ -90,7 +90,7 @@ public class BytecodeLoadConstant extends Bytecode {
|
|||
jcode == Bytecodes._ldc2_w;
|
||||
if (! codeOk) return false;
|
||||
|
||||
ConstantTag ctag = method().getConstants().getTagAt(rawIndex());
|
||||
ConstantTag ctag = method().getConstants().getTagAt(poolIndex());
|
||||
if (jcode == Bytecodes._ldc2_w) {
|
||||
// has to be double or long
|
||||
return (ctag.isDouble() || ctag.isLong()) ? true: false;
|
||||
|
|
|
@ -28,11 +28,13 @@ import java.io.*;
|
|||
|
||||
import com.sun.jdi.*;
|
||||
|
||||
import sun.jvm.hotspot.memory.SystemDictionary;
|
||||
import sun.jvm.hotspot.oops.Instance;
|
||||
import sun.jvm.hotspot.oops.InstanceKlass;
|
||||
import sun.jvm.hotspot.oops.ArrayKlass;
|
||||
import sun.jvm.hotspot.oops.JVMDIClassStatus;
|
||||
import sun.jvm.hotspot.oops.Klass;
|
||||
import sun.jvm.hotspot.oops.ObjArray;
|
||||
import sun.jvm.hotspot.oops.Oop;
|
||||
import sun.jvm.hotspot.oops.Symbol;
|
||||
import sun.jvm.hotspot.oops.DefaultHeapVisitor;
|
||||
|
@ -53,6 +55,7 @@ implements ReferenceType {
|
|||
private SoftReference methodsCache;
|
||||
private SoftReference allMethodsCache;
|
||||
private SoftReference nestedTypesCache;
|
||||
private SoftReference methodInvokesCache;
|
||||
|
||||
/* to mark when no info available */
|
||||
static final SDE NO_SDE_INFO_MARK = new SDE();
|
||||
|
@ -82,6 +85,27 @@ implements ReferenceType {
|
|||
return method;
|
||||
}
|
||||
}
|
||||
if (ref.getMethodHolder().equals(SystemDictionary.getMethodHandleKlass())) {
|
||||
// invoke methods are generated as needed, so make mirrors as needed
|
||||
List mis = null;
|
||||
if (methodInvokesCache == null) {
|
||||
mis = new ArrayList();
|
||||
methodInvokesCache = new SoftReference(mis);
|
||||
} else {
|
||||
mis = (List)methodInvokesCache.get();
|
||||
}
|
||||
it = mis.iterator();
|
||||
while (it.hasNext()) {
|
||||
MethodImpl method = (MethodImpl)it.next();
|
||||
if (ref.equals(method.ref())) {
|
||||
return method;
|
||||
}
|
||||
}
|
||||
|
||||
MethodImpl method = MethodImpl.createMethodImpl(vm, this, ref);
|
||||
mis.add(method);
|
||||
return method;
|
||||
}
|
||||
throw new IllegalArgumentException("Invalid method id: " + ref);
|
||||
}
|
||||
|
||||
|
|
|
@ -123,6 +123,9 @@ public class StackFrameImpl extends MirrorImpl
|
|||
Assert.that(values.size() > 0, "this is missing");
|
||||
}
|
||||
// 'this' at index 0.
|
||||
if (values.get(0).getType() == BasicType.getTConflict()) {
|
||||
return null;
|
||||
}
|
||||
OopHandle handle = values.oopHandleAt(0);
|
||||
ObjectHeap heap = vm.saObjectHeap();
|
||||
thisObject = vm.objectMirror(heap.newOop(handle));
|
||||
|
@ -210,6 +213,8 @@ public class StackFrameImpl extends MirrorImpl
|
|||
validateStackFrame();
|
||||
StackValueCollection values = saFrame.getLocals();
|
||||
MethodImpl mmm = (MethodImpl)location.method();
|
||||
if (mmm.isNative())
|
||||
return null;
|
||||
List argSigs = mmm.argumentSignatures();
|
||||
int count = argSigs.size();
|
||||
List res = new ArrayList(0);
|
||||
|
@ -231,6 +236,38 @@ public class StackFrameImpl extends MirrorImpl
|
|||
ValueImpl valueImpl = null;
|
||||
OopHandle handle = null;
|
||||
ObjectHeap heap = vm.saObjectHeap();
|
||||
if (values.get(ss).getType() == BasicType.getTConflict()) {
|
||||
// Dead locals, so just represent them as a zero of the appropriate type
|
||||
if (variableType == BasicType.T_BOOLEAN) {
|
||||
valueImpl = (BooleanValueImpl) vm.mirrorOf(false);
|
||||
} else if (variableType == BasicType.T_CHAR) {
|
||||
valueImpl = (CharValueImpl) vm.mirrorOf((char)0);
|
||||
} else if (variableType == BasicType.T_FLOAT) {
|
||||
valueImpl = (FloatValueImpl) vm.mirrorOf((float)0);
|
||||
} else if (variableType == BasicType.T_DOUBLE) {
|
||||
valueImpl = (DoubleValueImpl) vm.mirrorOf((double)0);
|
||||
} else if (variableType == BasicType.T_BYTE) {
|
||||
valueImpl = (ByteValueImpl) vm.mirrorOf((byte)0);
|
||||
} else if (variableType == BasicType.T_SHORT) {
|
||||
valueImpl = (ShortValueImpl) vm.mirrorOf((short)0);
|
||||
} else if (variableType == BasicType.T_INT) {
|
||||
valueImpl = (IntegerValueImpl) vm.mirrorOf((int)0);
|
||||
} else if (variableType == BasicType.T_LONG) {
|
||||
valueImpl = (LongValueImpl) vm.mirrorOf((long)0);
|
||||
} else if (variableType == BasicType.T_OBJECT) {
|
||||
// we may have an [Ljava/lang/Object; - i.e., Object[] with the
|
||||
// elements themselves may be arrays because every array is an Object.
|
||||
handle = null;
|
||||
valueImpl = (ObjectReferenceImpl) vm.objectMirror(heap.newOop(handle));
|
||||
} else if (variableType == BasicType.T_ARRAY) {
|
||||
handle = null;
|
||||
valueImpl = vm.arrayMirror((Array)heap.newOop(handle));
|
||||
} else if (variableType == BasicType.T_VOID) {
|
||||
valueImpl = new VoidValueImpl(vm);
|
||||
} else {
|
||||
throw new RuntimeException("Should not read here");
|
||||
}
|
||||
} else {
|
||||
if (variableType == BasicType.T_BOOLEAN) {
|
||||
valueImpl = (BooleanValueImpl) vm.mirrorOf(values.booleanAt(ss));
|
||||
} else if (variableType == BasicType.T_CHAR) {
|
||||
|
@ -260,6 +297,7 @@ public class StackFrameImpl extends MirrorImpl
|
|||
} else {
|
||||
throw new RuntimeException("Should not read here");
|
||||
}
|
||||
}
|
||||
|
||||
return valueImpl;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ public class SystemDictionary {
|
|||
private static sun.jvm.hotspot.types.OopField systemKlassField;
|
||||
private static sun.jvm.hotspot.types.OopField threadKlassField;
|
||||
private static sun.jvm.hotspot.types.OopField threadGroupKlassField;
|
||||
private static sun.jvm.hotspot.types.OopField methodHandleKlassField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
|
@ -69,6 +70,7 @@ public class SystemDictionary {
|
|||
systemKlassField = type.getOopField(WK_KLASS("System_klass"));
|
||||
threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
|
||||
threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
|
||||
methodHandleKlassField = type.getOopField(WK_KLASS("MethodHandle_klass"));
|
||||
}
|
||||
|
||||
// This WK functions must follow the definitions in systemDictionary.hpp:
|
||||
|
@ -127,6 +129,10 @@ public class SystemDictionary {
|
|||
return (InstanceKlass) newOop(systemKlassField.getValue());
|
||||
}
|
||||
|
||||
public static InstanceKlass getMethodHandleKlass() {
|
||||
return (InstanceKlass) newOop(methodHandleKlassField.getValue());
|
||||
}
|
||||
|
||||
public InstanceKlass getAbstractOwnableSynchronizerKlass() {
|
||||
return (InstanceKlass) find("java/util/concurrent/locks/AbstractOwnableSynchronizer",
|
||||
null, null);
|
||||
|
|
|
@ -93,6 +93,8 @@ public class CompiledVFrame extends JavaVFrame {
|
|||
}
|
||||
|
||||
public StackValueCollection getLocals() {
|
||||
if (getScope() == null)
|
||||
return new StackValueCollection();
|
||||
List scvList = getScope().getLocals();
|
||||
if (scvList == null)
|
||||
return new StackValueCollection();
|
||||
|
@ -108,6 +110,8 @@ public class CompiledVFrame extends JavaVFrame {
|
|||
}
|
||||
|
||||
public StackValueCollection getExpressions() {
|
||||
if (getScope() == null)
|
||||
return new StackValueCollection();
|
||||
List scvList = getScope().getExpressions();
|
||||
if (scvList == null)
|
||||
return new StackValueCollection();
|
||||
|
|
|
@ -33,6 +33,7 @@ import sun.jvm.hotspot.c1.*;
|
|||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.interpreter.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.sparc.SPARCFrame;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
|
@ -74,11 +75,19 @@ public abstract class Frame implements Cloneable {
|
|||
/** Size of constMethodOopDesc for computing BCI from BCP (FIXME: hack) */
|
||||
private static long constMethodOopDescSize;
|
||||
|
||||
private static int pcReturnOffset;
|
||||
|
||||
public static int pcReturnOffset() {
|
||||
return pcReturnOffset;
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type constMethodOopType = db.lookupType("constMethodOopDesc");
|
||||
// FIXME: not sure whether alignment here is correct or how to
|
||||
// force it (round up to address size?)
|
||||
constMethodOopDescSize = constMethodOopType.getSize();
|
||||
|
||||
pcReturnOffset = db.lookupIntConstant("frame::pc_return_offset").intValue();
|
||||
}
|
||||
|
||||
protected int bcpToBci(Address bcp, ConstMethod cm) {
|
||||
|
@ -106,6 +115,10 @@ public abstract class Frame implements Cloneable {
|
|||
public void setPC(Address newpc) { pc = newpc; }
|
||||
public boolean isDeoptimized() { return deoptimized; }
|
||||
|
||||
public CodeBlob cb() {
|
||||
return VM.getVM().getCodeCache().findBlob(getPC());
|
||||
}
|
||||
|
||||
public abstract Address getSP();
|
||||
public abstract Address getID();
|
||||
public abstract Address getFP();
|
||||
|
@ -134,6 +147,12 @@ public abstract class Frame implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
public boolean isRicochetFrame() {
|
||||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC());
|
||||
RicochetBlob rcb = VM.getVM().ricochetBlob();
|
||||
return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC()));
|
||||
}
|
||||
|
||||
public boolean isCompiledFrame() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(!VM.getVM().isCore(), "noncore builds only");
|
||||
|
@ -142,7 +161,7 @@ public abstract class Frame implements Cloneable {
|
|||
return (cb != null && cb.isJavaMethod());
|
||||
}
|
||||
|
||||
public boolean isGlueFrame() {
|
||||
public boolean isRuntimeFrame() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(!VM.getVM().isCore(), "noncore builds only");
|
||||
}
|
||||
|
@ -197,7 +216,8 @@ public abstract class Frame implements Cloneable {
|
|||
public Frame realSender(RegisterMap map) {
|
||||
if (!VM.getVM().isCore()) {
|
||||
Frame result = sender(map);
|
||||
while (result.isGlueFrame()) {
|
||||
while (result.isRuntimeFrame() ||
|
||||
result.isRicochetFrame()) {
|
||||
result = result.sender(map);
|
||||
}
|
||||
return result;
|
||||
|
@ -611,6 +631,9 @@ public abstract class Frame implements Cloneable {
|
|||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb != null, "sanity check");
|
||||
}
|
||||
if (cb == VM.getVM().ricochetBlob()) {
|
||||
oopsRicochetDo(oopVisitor, regMap);
|
||||
}
|
||||
if (cb.getOopMaps() != null) {
|
||||
OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging());
|
||||
|
||||
|
@ -627,6 +650,10 @@ public abstract class Frame implements Cloneable {
|
|||
// }
|
||||
}
|
||||
|
||||
private void oopsRicochetDo (AddressVisitor oopVisitor, RegisterMap regMap) {
|
||||
// XXX Empty for now
|
||||
}
|
||||
|
||||
// FIXME: implement the above routines, plus add
|
||||
// oops_interpreted_arguments_do and oops_compiled_arguments_do
|
||||
}
|
||||
|
|
|
@ -128,14 +128,14 @@ public abstract class JavaVFrame extends VFrame {
|
|||
}
|
||||
|
||||
// dynamic part - we just compare the frame pointer
|
||||
if (! getFrame().getFP().equals(other.getFrame().getFP())) {
|
||||
if (! getFrame().equals(other.getFrame())) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return getMethod().hashCode() ^ getBCI() ^ getFrame().getFP().hashCode();
|
||||
return getMethod().hashCode() ^ getBCI() ^ getFrame().hashCode();
|
||||
}
|
||||
|
||||
/** Structural compare */
|
||||
|
|
|
@ -100,7 +100,7 @@ public class StackValue {
|
|||
|
||||
public int hashCode() {
|
||||
if (type == BasicType.getTObject()) {
|
||||
return handleValue.hashCode();
|
||||
return handleValue != null ? handleValue.hashCode() : 5;
|
||||
} else {
|
||||
// Returns 0 for conflict type
|
||||
return (int) integerValue;
|
||||
|
|
|
@ -77,7 +77,7 @@ public class VFrame {
|
|||
return new CompiledVFrame(f, regMap, thread, scope, mayBeImprecise);
|
||||
}
|
||||
|
||||
if (f.isGlueFrame()) {
|
||||
if (f.isRuntimeFrame()) {
|
||||
// This is a conversion frame. Skip this frame and try again.
|
||||
RegisterMap tempMap = regMap.copy();
|
||||
Frame s = f.sender(tempMap);
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.*;
|
|||
import java.util.regex.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.c1.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.interpreter.*;
|
||||
import sun.jvm.hotspot.memory.*;
|
||||
|
@ -85,6 +86,9 @@ public class VM {
|
|||
private Interpreter interpreter;
|
||||
private StubRoutines stubRoutines;
|
||||
private Bytes bytes;
|
||||
|
||||
private RicochetBlob ricochetBlob;
|
||||
|
||||
/** Flags indicating whether we are attached to a core, C1, or C2 build */
|
||||
private boolean usingClientCompiler;
|
||||
private boolean usingServerCompiler;
|
||||
|
@ -618,6 +622,18 @@ public class VM {
|
|||
return stubRoutines;
|
||||
}
|
||||
|
||||
public RicochetBlob ricochetBlob() {
|
||||
if (ricochetBlob == null) {
|
||||
Type ricochetType = db.lookupType("SharedRuntime");
|
||||
AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob");
|
||||
Address addr = ricochetBlobAddress.getValue();
|
||||
if (addr != null) {
|
||||
ricochetBlob = new RicochetBlob(addr);
|
||||
}
|
||||
}
|
||||
return ricochetBlob;
|
||||
}
|
||||
|
||||
public VMRegImpl getVMRegImplInfo() {
|
||||
if (vmregImpl == null) {
|
||||
vmregImpl = new VMRegImpl();
|
||||
|
|
|
@ -29,6 +29,7 @@ import sun.jvm.hotspot.debugger.amd64.*;
|
|||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.interpreter.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.runtime.x86.*;
|
||||
|
||||
/** <P> Should be able to be used on all amd64 platforms we support
|
||||
(Linux/amd64) to implement JavaThread's
|
||||
|
@ -123,7 +124,7 @@ public class AMD64CurrentFrameGuess {
|
|||
offset += vm.getAddressSize()) {
|
||||
try {
|
||||
Address curSP = sp.addOffsetTo(offset);
|
||||
Frame frame = new AMD64Frame(curSP, null, pc);
|
||||
Frame frame = new X86Frame(curSP, null, pc);
|
||||
RegisterMap map = thread.newRegisterMap(false);
|
||||
while (frame != null) {
|
||||
if (frame.isEntryFrame() && frame.entryFrameIsFirst()) {
|
||||
|
|
|
@ -1,528 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.amd64;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.compiler.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.oops.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
/** Specialization of and implementation of abstract methods of the
|
||||
Frame class for the amd64 CPU. */
|
||||
|
||||
public class AMD64Frame extends Frame {
|
||||
private static final boolean DEBUG;
|
||||
static {
|
||||
DEBUG = System.getProperty("sun.jvm.hotspot.runtime.amd64.AMD64Frame.DEBUG") != null;
|
||||
}
|
||||
|
||||
// refer to frame_amd64.hpp
|
||||
private static final int PC_RETURN_OFFSET = 0;
|
||||
// All frames
|
||||
private static final int LINK_OFFSET = 0;
|
||||
private static final int RETURN_ADDR_OFFSET = 1;
|
||||
private static final int SENDER_SP_OFFSET = 2;
|
||||
|
||||
// Interpreter frames
|
||||
private static final int INTERPRETER_FRAME_MIRROR_OFFSET = 2; // for native calls only
|
||||
private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1;
|
||||
private static final int INTERPRETER_FRAME_LAST_SP_OFFSET = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1;
|
||||
private static final int INTERPRETER_FRAME_METHOD_OFFSET = INTERPRETER_FRAME_LAST_SP_OFFSET - 1;
|
||||
private static int INTERPRETER_FRAME_MDX_OFFSET; // Non-core builds only
|
||||
private static int INTERPRETER_FRAME_CACHE_OFFSET;
|
||||
private static int INTERPRETER_FRAME_LOCALS_OFFSET;
|
||||
private static int INTERPRETER_FRAME_BCX_OFFSET;
|
||||
private static int INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
private static int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET;
|
||||
private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
|
||||
|
||||
// Entry frames
|
||||
private static final int ENTRY_FRAME_CALL_WRAPPER_OFFSET = -6;
|
||||
|
||||
// Native frames
|
||||
private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
if (VM.getVM().isCore()) {
|
||||
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
|
||||
} else {
|
||||
INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
|
||||
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
|
||||
}
|
||||
INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1;
|
||||
INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
|
||||
INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1;
|
||||
INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
}
|
||||
|
||||
// an additional field beyond sp and pc:
|
||||
Address raw_fp; // frame pointer
|
||||
private Address raw_unextendedSP;
|
||||
|
||||
private AMD64Frame() {
|
||||
}
|
||||
|
||||
private void adjustForDeopt() {
|
||||
if ( pc != null) {
|
||||
// Look for a deopt pc and if it is deopted convert to original pc
|
||||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
|
||||
if (cb != null && cb.isJavaMethod()) {
|
||||
NMethod nm = (NMethod) cb;
|
||||
if (pc.equals(nm.deoptBegin())) {
|
||||
// adjust pc if frame is deoptimized.
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
|
||||
}
|
||||
pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
|
||||
deoptimized = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public AMD64Frame(Address raw_sp, Address raw_fp, Address pc) {
|
||||
this.raw_sp = raw_sp;
|
||||
this.raw_unextendedSP = raw_sp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = pc;
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("AMD64Frame(sp, fp, pc): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
}
|
||||
|
||||
public AMD64Frame(Address raw_sp, Address raw_fp) {
|
||||
this.raw_sp = raw_sp;
|
||||
this.raw_unextendedSP = raw_sp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("AMD64Frame(sp, fp): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
}
|
||||
|
||||
// This constructor should really take the unextended SP as an arg
|
||||
// but then the constructor is ambiguous with constructor that takes
|
||||
// a PC so take an int and convert it.
|
||||
public AMD64Frame(Address raw_sp, Address raw_fp, long extension) {
|
||||
this.raw_sp = raw_sp;
|
||||
if ( raw_sp == null) {
|
||||
this.raw_unextendedSP = null;
|
||||
} else {
|
||||
this.raw_unextendedSP = raw_sp.addOffsetTo(extension);
|
||||
}
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("AMD64Frame(sp, fp, extension): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public Object clone() {
|
||||
AMD64Frame frame = new AMD64Frame();
|
||||
frame.raw_sp = raw_sp;
|
||||
frame.raw_unextendedSP = raw_unextendedSP;
|
||||
frame.raw_fp = raw_fp;
|
||||
frame.pc = pc;
|
||||
frame.deoptimized = deoptimized;
|
||||
return frame;
|
||||
}
|
||||
|
||||
public boolean equals(Object arg) {
|
||||
if (arg == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(arg instanceof AMD64Frame)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
AMD64Frame other = (AMD64Frame) arg;
|
||||
|
||||
return (AddressOps.equal(getSP(), other.getSP()) &&
|
||||
AddressOps.equal(getFP(), other.getFP()) &&
|
||||
AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) &&
|
||||
AddressOps.equal(getPC(), other.getPC()));
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (raw_sp == null) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return raw_sp.hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "sp: " + (getSP() == null? "null" : getSP().toString()) +
|
||||
", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) +
|
||||
", fp: " + (getFP() == null? "null" : getFP().toString()) +
|
||||
", pc: " + (pc == null? "null" : pc.toString());
|
||||
}
|
||||
|
||||
// accessors for the instance variables
|
||||
public Address getFP() { return raw_fp; }
|
||||
public Address getSP() { return raw_sp; }
|
||||
public Address getID() { return raw_sp; }
|
||||
|
||||
// FIXME: not implemented yet (should be done for Solaris/AMD64)
|
||||
public boolean isSignalHandlerFrameDbg() { return false; }
|
||||
public int getSignalNumberDbg() { return 0; }
|
||||
public String getSignalNameDbg() { return null; }
|
||||
|
||||
public boolean isInterpretedFrameValid() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(isInterpretedFrame(), "Not an interpreted frame");
|
||||
}
|
||||
|
||||
// These are reasonable sanity checks
|
||||
if (getFP() == null || getFP().andWithMask(0x3) != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getSP() == null || getSP().andWithMask(0x3) != null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// These are hacks to keep us out of trouble.
|
||||
// The problem with these is that they mask other problems
|
||||
if (getFP().lessThanOrEqual(getSP())) {
|
||||
// this attempts to deal with unsigned comparison above
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getFP().minus(getSP()) > 4096 * VM.getVM().getAddressSize()) {
|
||||
// stack frames shouldn't be large.
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// FIXME: not applicable in current system
|
||||
// void patch_pc(Thread* thread, address pc);
|
||||
|
||||
public Frame sender(RegisterMap regMap, CodeBlob cb) {
|
||||
AMD64RegisterMap map = (AMD64RegisterMap) regMap;
|
||||
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
|
||||
// Default is we done have to follow them. The sender_for_xxx will
|
||||
// update it accordingly
|
||||
map.setIncludeArgumentOops(false);
|
||||
|
||||
if (isEntryFrame()) return senderForEntryFrame(map);
|
||||
if (isInterpretedFrame()) return senderForInterpreterFrame(map);
|
||||
|
||||
|
||||
if (!VM.getVM().isCore()) {
|
||||
if(cb == null) {
|
||||
cb = VM.getVM().getCodeCache().findBlob(getPC());
|
||||
} else {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
|
||||
}
|
||||
}
|
||||
|
||||
if (cb != null) {
|
||||
return senderForCompiledFrame(map, cb);
|
||||
}
|
||||
}
|
||||
|
||||
// Must be native-compiled frame, i.e. the marshaling code for native
|
||||
// methods that exists in the core system.
|
||||
return new AMD64Frame(getSenderSP(), getLink(), getSenderPC());
|
||||
}
|
||||
|
||||
private Frame senderForEntryFrame(AMD64RegisterMap map) {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
AMD64JavaCallWrapper jcw = (AMD64JavaCallWrapper) getEntryFrameCallWrapper();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero");
|
||||
Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack");
|
||||
}
|
||||
AMD64Frame fr;
|
||||
if (jcw.getLastJavaPC() != null) {
|
||||
fr = new AMD64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC());
|
||||
} else {
|
||||
fr = new AMD64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP());
|
||||
}
|
||||
map.clear();
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
|
||||
}
|
||||
return fr;
|
||||
}
|
||||
|
||||
private Frame senderForInterpreterFrame(AMD64RegisterMap map) {
|
||||
Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
|
||||
Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
|
||||
// We do not need to update the callee-save register mapping because above
|
||||
// us is either another interpreter frame or a converter-frame, but never
|
||||
// directly a compiled frame.
|
||||
// 11/24/04 SFG. This is no longer true after adapter were removed. However at the moment
|
||||
// C2 no longer uses callee save register for java calls so there are no callee register
|
||||
// to find.
|
||||
return new AMD64Frame(sp, getLink(), unextendedSP.minus(sp));
|
||||
}
|
||||
|
||||
private Frame senderForCompiledFrame(AMD64RegisterMap map, CodeBlob cb) {
|
||||
//
|
||||
// NOTE: some of this code is (unfortunately) duplicated in AMD64CurrentFrameGuess
|
||||
//
|
||||
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
|
||||
// frame owned by optimizing compiler
|
||||
Address sender_sp = null;
|
||||
|
||||
|
||||
if (VM.getVM().isClientCompiler()) {
|
||||
sender_sp = addressOfStackSlot(SENDER_SP_OFFSET);
|
||||
} else {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.getFrameSize() >= 0, "Compiled by Compiler1: do not use");
|
||||
}
|
||||
sender_sp = getUnextendedSP().addOffsetTo(cb.getFrameSize());
|
||||
}
|
||||
|
||||
// On Intel the return_address is always the word on the stack
|
||||
Address sender_pc = sender_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
|
||||
if (map.getUpdateMap() && cb.getOopMaps() != null) {
|
||||
OopMapSet.updateRegisterMap(this, cb, map, true);
|
||||
}
|
||||
|
||||
if (VM.getVM().isClientCompiler()) {
|
||||
// Move this here for C1 and collecting oops in arguments (According to Rene)
|
||||
map.setIncludeArgumentOops(cb.callerMustGCArguments(map.getThread()));
|
||||
}
|
||||
|
||||
Address saved_fp = null;
|
||||
if (VM.getVM().isClientCompiler()) {
|
||||
saved_fp = getFP().getAddressAt(0);
|
||||
} else if (VM.getVM().isServerCompiler() &&
|
||||
(VM.getVM().getInterpreter().contains(sender_pc) ||
|
||||
VM.getVM().getStubRoutines().returnsToCallStub(sender_pc))) {
|
||||
// C2 prologue saves EBP in the usual place.
|
||||
// however only use it if the sender had link infomration in it.
|
||||
saved_fp = sender_sp.getAddressAt(-2 * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
return new AMD64Frame(sender_sp, saved_fp, sender_pc);
|
||||
}
|
||||
|
||||
protected boolean hasSenderPD() {
|
||||
// FIXME
|
||||
// Check for null ebp? Need to do some tests.
|
||||
return true;
|
||||
}
|
||||
|
||||
public long frameSize() {
|
||||
return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
public Address getLink() {
|
||||
return addressOfStackSlot(LINK_OFFSET).getAddressAt(0);
|
||||
}
|
||||
|
||||
// FIXME: not implementable yet
|
||||
//inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
|
||||
|
||||
public Address getUnextendedSP() { return raw_unextendedSP; }
|
||||
|
||||
// Return address:
|
||||
public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); }
|
||||
public Address getSenderPC() { return getSenderPCAddr().getAddressAt(0); }
|
||||
|
||||
// return address of param, zero origin index.
|
||||
public Address getNativeParamAddr(int idx) {
|
||||
return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
|
||||
}
|
||||
|
||||
public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); }
|
||||
|
||||
public Address compiledArgumentToLocationPD(VMReg reg, RegisterMap regMap, int argSize) {
|
||||
if (VM.getVM().isCore() || VM.getVM().isClientCompiler()) {
|
||||
throw new RuntimeException("Should not reach here");
|
||||
}
|
||||
|
||||
return oopMapRegToLocation(reg, regMap);
|
||||
}
|
||||
|
||||
public Address addressOfInterpreterFrameLocals() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
|
||||
}
|
||||
|
||||
private Address addressOfInterpreterFrameBCX() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET);
|
||||
}
|
||||
|
||||
public int getInterpreterFrameBCI() {
|
||||
// FIXME: this is not atomic with respect to GC and is unsuitable
|
||||
// for use in a non-debugging, or reflective, system. Need to
|
||||
// figure out how to express this.
|
||||
Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0);
|
||||
OopHandle methodHandle = addressOfInterpreterFrameMethod().getOopHandleAt(0);
|
||||
Method method = (Method) VM.getVM().getObjectHeap().newOop(methodHandle);
|
||||
return (int) bcpToBci(bcp, method);
|
||||
}
|
||||
|
||||
public Address addressOfInterpreterFrameMDX() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET);
|
||||
}
|
||||
|
||||
// FIXME
|
||||
//inline int frame::interpreter_frame_monitor_size() {
|
||||
// return BasicObjectLock::size();
|
||||
//}
|
||||
|
||||
// expression stack
|
||||
// (the max_stack arguments are used by the GC; see class FrameClosure)
|
||||
|
||||
public Address addressOfInterpreterFrameExpressionStack() {
|
||||
Address monitorEnd = interpreterFrameMonitorEnd().address();
|
||||
return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
public int getInterpreterFrameExpressionStackDirection() { return -1; }
|
||||
|
||||
// top of expression stack
|
||||
public Address addressOfInterpreterFrameTOS() {
|
||||
return getSP();
|
||||
}
|
||||
|
||||
/** Expression stack from top down */
|
||||
public Address addressOfInterpreterFrameTOSAt(int slot) {
|
||||
return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
public Address getInterpreterFrameSenderSP() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(isInterpretedFrame(), "interpreted frame expected");
|
||||
}
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
|
||||
}
|
||||
|
||||
// Monitors
|
||||
public BasicObjectLock interpreterFrameMonitorBegin() {
|
||||
return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET));
|
||||
}
|
||||
|
||||
public BasicObjectLock interpreterFrameMonitorEnd() {
|
||||
Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0);
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
// make sure the pointer points inside the frame
|
||||
Assert.that(AddressOps.gt(getFP(), result), "result must < than frame pointer");
|
||||
Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer");
|
||||
}
|
||||
return new BasicObjectLock(result);
|
||||
}
|
||||
|
||||
public int interpreterFrameMonitorSize() {
|
||||
return BasicObjectLock.size();
|
||||
}
|
||||
|
||||
// Method
|
||||
public Address addressOfInterpreterFrameMethod() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET);
|
||||
}
|
||||
|
||||
// Constant pool cache
|
||||
public Address addressOfInterpreterFrameCPCache() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET);
|
||||
}
|
||||
|
||||
// Entry frames
|
||||
public JavaCallWrapper getEntryFrameCallWrapper() {
|
||||
return new AMD64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0));
|
||||
}
|
||||
|
||||
protected Address addressOfSavedOopResult() {
|
||||
// offset is 2 for compiler2 and 3 for compiler1
|
||||
return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) *
|
||||
VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
protected Address addressOfSavedReceiver() {
|
||||
return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
private void dumpStack() {
|
||||
if (getFP() != null) {
|
||||
for (Address addr = getSP().addOffsetTo(-5 * VM.getVM().getAddressSize());
|
||||
AddressOps.lte(addr, getFP().addOffsetTo(5 * VM.getVM().getAddressSize()));
|
||||
addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
|
||||
System.out.println(addr + ": " + addr.getAddressAt(0));
|
||||
}
|
||||
} else {
|
||||
for (Address addr = getSP().addOffsetTo(-5 * VM.getVM().getAddressSize());
|
||||
AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize()));
|
||||
addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
|
||||
System.out.println(addr + ": " + addr.getAddressAt(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -30,6 +30,7 @@ import sun.jvm.hotspot.debugger.*;
|
|||
import sun.jvm.hotspot.debugger.amd64.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.runtime.amd64.*;
|
||||
import sun.jvm.hotspot.runtime.x86.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
|
@ -80,11 +81,11 @@ public class LinuxAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
|
|||
if (fp == null) {
|
||||
return null; // no information
|
||||
}
|
||||
return new AMD64Frame(thread.getLastJavaSP(), fp);
|
||||
return new X86Frame(thread.getLastJavaSP(), fp);
|
||||
}
|
||||
|
||||
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
|
||||
return new AMD64RegisterMap(thread, updateMap);
|
||||
return new X86RegisterMap(thread, updateMap);
|
||||
}
|
||||
|
||||
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
|
||||
|
@ -95,9 +96,9 @@ public class LinuxAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
|
|||
return null;
|
||||
}
|
||||
if (guesser.getPC() == null) {
|
||||
return new AMD64Frame(guesser.getSP(), guesser.getFP());
|
||||
return new X86Frame(guesser.getSP(), guesser.getFP());
|
||||
} else {
|
||||
return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import sun.jvm.hotspot.debugger.*;
|
|||
import sun.jvm.hotspot.debugger.amd64.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.runtime.amd64.*;
|
||||
import sun.jvm.hotspot.runtime.x86.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
|
@ -84,14 +85,14 @@ public class SolarisAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
|
|||
}
|
||||
Address pc = thread.getLastJavaPC();
|
||||
if ( pc != null ) {
|
||||
return new AMD64Frame(thread.getLastJavaSP(), fp, pc);
|
||||
return new X86Frame(thread.getLastJavaSP(), fp, pc);
|
||||
} else {
|
||||
return new AMD64Frame(thread.getLastJavaSP(), fp);
|
||||
return new X86Frame(thread.getLastJavaSP(), fp);
|
||||
}
|
||||
}
|
||||
|
||||
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
|
||||
return new AMD64RegisterMap(thread, updateMap);
|
||||
return new X86RegisterMap(thread, updateMap);
|
||||
}
|
||||
|
||||
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
|
||||
|
@ -102,9 +103,9 @@ public class SolarisAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
|
|||
return null;
|
||||
}
|
||||
if (guesser.getPC() == null) {
|
||||
return new AMD64Frame(guesser.getSP(), guesser.getFP());
|
||||
return new X86Frame(guesser.getSP(), guesser.getFP());
|
||||
} else {
|
||||
return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -236,7 +236,7 @@ public class SPARCFrame extends Frame {
|
|||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
|
||||
if (cb != null && cb.isJavaMethod()) {
|
||||
NMethod nm = (NMethod) cb;
|
||||
if (pc.equals(nm.deoptBegin())) {
|
||||
if (pc.equals(nm.deoptHandlerBegin())) {
|
||||
// adjust pc if frame is deoptimized.
|
||||
pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
|
||||
deoptimized = true;
|
||||
|
@ -559,7 +559,6 @@ public class SPARCFrame extends Frame {
|
|||
}
|
||||
}
|
||||
|
||||
if (!VM.getVM().isCore()) {
|
||||
// Note: The version of this operation on any platform with callee-save
|
||||
// registers must update the register map (if not null).
|
||||
// In order to do this correctly, the various subtypes of
|
||||
|
@ -572,6 +571,7 @@ public class SPARCFrame extends Frame {
|
|||
// registers callee-saved, then we will have to copy over
|
||||
// the RegisterMap update logic from the Intel code.
|
||||
|
||||
if (isRicochetFrame()) return senderForRicochetFrame(map);
|
||||
|
||||
// The constructor of the sender must know whether this frame is interpreted so it can set the
|
||||
// sender's _interpreter_sp_adjustment field.
|
||||
|
@ -584,24 +584,21 @@ public class SPARCFrame extends Frame {
|
|||
// supplied blob which is already known to be associated with this frame.
|
||||
cb = VM.getVM().getCodeCache().findBlob(pc);
|
||||
if (cb != null) {
|
||||
|
||||
if (cb.callerMustGCArguments(map.getThread())) {
|
||||
map.setIncludeArgumentOops(true);
|
||||
}
|
||||
|
||||
// Update the location of all implicitly saved registers
|
||||
// as the address of these registers in the register save
|
||||
// area (for %o registers we use the address of the %i
|
||||
// register in the next younger frame)
|
||||
map.shiftWindow(sp, youngerSP);
|
||||
if (map.getUpdateMap()) {
|
||||
if (cb.callerMustGCArguments()) {
|
||||
map.setIncludeArgumentOops(true);
|
||||
}
|
||||
if (cb.getOopMaps() != null) {
|
||||
OopMapSet.updateRegisterMap(this, cb, map, VM.getVM().isDebugging());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // #ifndef CORE
|
||||
|
||||
return new SPARCFrame(biasSP(sp), biasSP(youngerSP), isInterpreted);
|
||||
}
|
||||
|
@ -948,6 +945,20 @@ public class SPARCFrame extends Frame {
|
|||
}
|
||||
|
||||
|
||||
private Frame senderForRicochetFrame(SPARCRegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForRicochetFrame");
|
||||
}
|
||||
//RicochetFrame* f = RicochetFrame::from_frame(fr);
|
||||
// Cf. is_interpreted_frame path of frame::sender
|
||||
Address youngerSP = getSP();
|
||||
Address sp = getSenderSP();
|
||||
map.makeIntegerRegsUnsaved();
|
||||
map.shiftWindow(sp, youngerSP);
|
||||
boolean thisFrameAdjustedStack = true; // I5_savedSP is live in this RF
|
||||
return new SPARCFrame(sp, youngerSP, thisFrameAdjustedStack);
|
||||
}
|
||||
|
||||
private Frame senderForEntryFrame(RegisterMap regMap) {
|
||||
SPARCRegisterMap map = (SPARCRegisterMap) regMap;
|
||||
|
||||
|
@ -965,10 +976,8 @@ public class SPARCFrame extends Frame {
|
|||
Address lastJavaPC = jcw.getLastJavaPC();
|
||||
map.clear();
|
||||
|
||||
if (!VM.getVM().isCore()) {
|
||||
map.makeIntegerRegsUnsaved();
|
||||
map.shiftWindow(lastJavaSP, null);
|
||||
}
|
||||
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.sparc;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.asm.sparc.SPARCRegister;
|
||||
import sun.jvm.hotspot.asm.sparc.SPARCRegisters;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class SPARCRicochetFrame {
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private SPARCFrame frame;
|
||||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
// Type type = db.lookupType("MethodHandles::RicochetFrame");
|
||||
|
||||
}
|
||||
|
||||
static SPARCRicochetFrame fromFrame(SPARCFrame f) {
|
||||
return new SPARCRicochetFrame(f);
|
||||
}
|
||||
|
||||
private SPARCRicochetFrame(SPARCFrame f) {
|
||||
frame = f;
|
||||
}
|
||||
|
||||
private Address registerValue(SPARCRegister reg) {
|
||||
return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0);
|
||||
}
|
||||
|
||||
public Address savedArgsBase() {
|
||||
return registerValue(SPARCRegisters.L4);
|
||||
}
|
||||
public Address exactSenderSP() {
|
||||
return registerValue(SPARCRegisters.I5);
|
||||
}
|
||||
public Address senderLink() {
|
||||
return frame.getSenderSP();
|
||||
}
|
||||
public Address senderPC() {
|
||||
return frame.getSenderPC();
|
||||
}
|
||||
public Address extendedSenderSP() {
|
||||
return savedArgsBase();
|
||||
}
|
||||
}
|
|
@ -31,6 +31,7 @@ import sun.jvm.hotspot.debugger.win32.*;
|
|||
import sun.jvm.hotspot.debugger.amd64.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.runtime.amd64.*;
|
||||
import sun.jvm.hotspot.runtime.x86.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
|
@ -86,14 +87,14 @@ public class Win32AMD64JavaThreadPDAccess implements JavaThreadPDAccess {
|
|||
}
|
||||
Address pc = thread.getLastJavaPC();
|
||||
if ( pc != null ) {
|
||||
return new AMD64Frame(thread.getLastJavaSP(), fp, pc);
|
||||
return new X86Frame(thread.getLastJavaSP(), fp, pc);
|
||||
} else {
|
||||
return new AMD64Frame(thread.getLastJavaSP(), fp);
|
||||
return new X86Frame(thread.getLastJavaSP(), fp);
|
||||
}
|
||||
}
|
||||
|
||||
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
|
||||
return new AMD64RegisterMap(thread, updateMap);
|
||||
return new X86RegisterMap(thread, updateMap);
|
||||
}
|
||||
|
||||
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
|
||||
|
@ -104,9 +105,9 @@ public class Win32AMD64JavaThreadPDAccess implements JavaThreadPDAccess {
|
|||
return null;
|
||||
}
|
||||
if (guesser.getPC() == null) {
|
||||
return new AMD64Frame(guesser.getSP(), guesser.getFP());
|
||||
return new X86Frame(guesser.getSP(), guesser.getFP());
|
||||
} else {
|
||||
return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
package sun.jvm.hotspot.runtime.x86;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.asm.x86.*;
|
||||
import sun.jvm.hotspot.code.*;
|
||||
import sun.jvm.hotspot.compiler.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
|
@ -62,11 +61,13 @@ public class X86Frame extends Frame {
|
|||
private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
|
||||
|
||||
// Entry frames
|
||||
private static final int ENTRY_FRAME_CALL_WRAPPER_OFFSET = 2;
|
||||
private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET;
|
||||
|
||||
// Native frames
|
||||
private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2;
|
||||
|
||||
private static VMReg rbp;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
|
@ -76,18 +77,22 @@ public class X86Frame extends Frame {
|
|||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
if (VM.getVM().isCore()) {
|
||||
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
|
||||
} else {
|
||||
INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
|
||||
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
|
||||
}
|
||||
INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1;
|
||||
INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
|
||||
INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1;
|
||||
INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
|
||||
|
||||
ENTRY_FRAME_CALL_WRAPPER_OFFSET = db.lookupIntConstant("frame::entry_frame_call_wrapper_offset");
|
||||
if (VM.getVM().getAddressSize() == 4) {
|
||||
rbp = new VMReg(5);
|
||||
} else {
|
||||
rbp = new VMReg(5 << 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// an additional field beyond sp and pc:
|
||||
Address raw_fp; // frame pointer
|
||||
|
@ -102,7 +107,7 @@ public class X86Frame extends Frame {
|
|||
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
|
||||
if (cb != null && cb.isJavaMethod()) {
|
||||
NMethod nm = (NMethod) cb;
|
||||
if (pc.equals(nm.deoptBegin())) {
|
||||
if (pc.equals(nm.deoptHandlerBegin())) {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
|
||||
}
|
||||
|
@ -119,6 +124,7 @@ public class X86Frame extends Frame {
|
|||
this.raw_unextendedSP = raw_sp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = pc;
|
||||
adjustUnextendedSP();
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
@ -134,6 +140,7 @@ public class X86Frame extends Frame {
|
|||
this.raw_unextendedSP = raw_sp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
adjustUnextendedSP();
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
@ -144,24 +151,18 @@ public class X86Frame extends Frame {
|
|||
}
|
||||
}
|
||||
|
||||
// This constructor should really take the unextended SP as an arg
|
||||
// but then the constructor is ambiguous with constructor that takes
|
||||
// a PC so take an int and convert it.
|
||||
public X86Frame(Address raw_sp, Address raw_fp, long extension) {
|
||||
public X86Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) {
|
||||
this.raw_sp = raw_sp;
|
||||
if (raw_sp == null) {
|
||||
this.raw_unextendedSP = null;
|
||||
} else {
|
||||
this.raw_unextendedSP = raw_sp.addOffsetTo(extension);
|
||||
}
|
||||
this.raw_unextendedSP = raw_unextendedSp;
|
||||
this.raw_fp = raw_fp;
|
||||
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
this.pc = pc;
|
||||
adjustUnextendedSP();
|
||||
|
||||
// Frame must be fully constructed before this call
|
||||
adjustForDeopt();
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("X86Frame(sp, fp): " + this);
|
||||
System.out.println("X86Frame(sp, unextendedSP, fp, pc): " + this);
|
||||
dumpStack();
|
||||
}
|
||||
|
||||
|
@ -172,7 +173,6 @@ public class X86Frame extends Frame {
|
|||
frame.raw_sp = raw_sp;
|
||||
frame.raw_unextendedSP = raw_unextendedSP;
|
||||
frame.raw_fp = raw_fp;
|
||||
frame.raw_fp = raw_fp;
|
||||
frame.pc = pc;
|
||||
frame.deoptimized = deoptimized;
|
||||
return frame;
|
||||
|
@ -269,8 +269,8 @@ public class X86Frame extends Frame {
|
|||
|
||||
if (isEntryFrame()) return senderForEntryFrame(map);
|
||||
if (isInterpretedFrame()) return senderForInterpreterFrame(map);
|
||||
if (isRicochetFrame()) return senderForRicochetFrame(map);
|
||||
|
||||
if (!VM.getVM().isCore()) {
|
||||
if(cb == null) {
|
||||
cb = VM.getVM().getCodeCache().findBlob(getPC());
|
||||
} else {
|
||||
|
@ -282,14 +282,26 @@ public class X86Frame extends Frame {
|
|||
if (cb != null) {
|
||||
return senderForCompiledFrame(map, cb);
|
||||
}
|
||||
}
|
||||
|
||||
// Must be native-compiled frame, i.e. the marshaling code for native
|
||||
// methods that exists in the core system.
|
||||
return new X86Frame(getSenderSP(), getLink(), getSenderPC());
|
||||
}
|
||||
|
||||
private Frame senderForRicochetFrame(X86RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForRicochetFrame");
|
||||
}
|
||||
X86RicochetFrame f = X86RicochetFrame.fromFrame(this);
|
||||
if (map.getUpdateMap())
|
||||
updateMapWithSavedLink(map, f.senderLinkAddress());
|
||||
return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC());
|
||||
}
|
||||
|
||||
private Frame senderForEntryFrame(X86RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForEntryFrame");
|
||||
}
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(map != null, "map must be set");
|
||||
}
|
||||
|
@ -313,7 +325,37 @@ public class X86Frame extends Frame {
|
|||
return fr;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
private void adjustUnextendedSP() {
|
||||
// If we are returning to a compiled MethodHandle call site, the
|
||||
// saved_fp will in fact be a saved value of the unextended SP. The
|
||||
// simplest way to tell whether we are returning to such a call site
|
||||
// is as follows:
|
||||
|
||||
CodeBlob cb = cb();
|
||||
NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
|
||||
if (senderNm != null) {
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
if (senderNm.isDeoptMhEntry(getPC())) {
|
||||
// DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
|
||||
raw_unextendedSP = getFP();
|
||||
}
|
||||
else if (senderNm.isDeoptEntry(getPC())) {
|
||||
// DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
|
||||
}
|
||||
else if (senderNm.isMethodHandleReturn(getPC())) {
|
||||
raw_unextendedSP = getFP();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Frame senderForInterpreterFrame(X86RegisterMap map) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForInterpreterFrame");
|
||||
}
|
||||
Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
|
||||
Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
|
||||
// We do not need to update the callee-save register mapping because above
|
||||
|
@ -323,10 +365,21 @@ public class X86Frame extends Frame {
|
|||
// However c2 no longer uses callee save register for java calls so there
|
||||
// are no callee register to find.
|
||||
|
||||
return new X86Frame(sp, getLink(), unextendedSP.minus(sp));
|
||||
if (map.getUpdateMap())
|
||||
updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET));
|
||||
|
||||
return new X86Frame(sp, unextendedSP, getLink(), getSenderPC());
|
||||
}
|
||||
|
||||
private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) {
|
||||
map.setLocation(rbp, savedFPAddr);
|
||||
}
|
||||
|
||||
private Frame senderForCompiledFrame(X86RegisterMap map, CodeBlob cb) {
|
||||
if (DEBUG) {
|
||||
System.out.println("senderForCompiledFrame");
|
||||
}
|
||||
|
||||
//
|
||||
// NOTE: some of this code is (unfortunately) duplicated in X86CurrentFrameGuess
|
||||
//
|
||||
|
@ -336,41 +389,35 @@ public class X86Frame extends Frame {
|
|||
}
|
||||
|
||||
// frame owned by optimizing compiler
|
||||
Address sender_sp = null;
|
||||
|
||||
if (VM.getVM().isClientCompiler()) {
|
||||
sender_sp = addressOfStackSlot(SENDER_SP_OFFSET);
|
||||
} else {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(cb.getFrameSize() >= 0, "Compiled by Compiler1: do not use");
|
||||
}
|
||||
sender_sp = getUnextendedSP().addOffsetTo(cb.getFrameSize());
|
||||
Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size");
|
||||
}
|
||||
Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize());
|
||||
|
||||
// On Intel the return_address is always the word on the stack
|
||||
Address sender_pc = sender_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize());
|
||||
|
||||
if (map.getUpdateMap() && cb.getOopMaps() != null) {
|
||||
// This is the saved value of EBP which may or may not really be an FP.
|
||||
// It is only an FP if the sender is an interpreter frame (or C1?).
|
||||
Address savedFPAddr = senderSP.addOffsetTo(- SENDER_SP_OFFSET * VM.getVM().getAddressSize());
|
||||
|
||||
if (map.getUpdateMap()) {
|
||||
// Tell GC to use argument oopmaps for some runtime stubs that need it.
|
||||
// For C1, the runtime stub might not have oop maps, so set this flag
|
||||
// outside of update_register_map.
|
||||
map.setIncludeArgumentOops(cb.callerMustGCArguments());
|
||||
|
||||
if (cb.getOopMaps() != null) {
|
||||
OopMapSet.updateRegisterMap(this, cb, map, true);
|
||||
}
|
||||
|
||||
if (VM.getVM().isClientCompiler()) {
|
||||
// Move this here for C1 and collecting oops in arguments (According to Rene)
|
||||
map.setIncludeArgumentOops(cb.callerMustGCArguments(map.getThread()));
|
||||
// Since the prolog does the save and restore of EBP there is no oopmap
|
||||
// for it so we must fill in its location as if there was an oopmap entry
|
||||
// since if our caller was compiled code there could be live jvm state in it.
|
||||
updateMapWithSavedLink(map, savedFPAddr);
|
||||
}
|
||||
|
||||
Address saved_fp = null;
|
||||
if (VM.getVM().isClientCompiler()) {
|
||||
saved_fp = getFP().getAddressAt(0);
|
||||
} else if (VM.getVM().isServerCompiler() &&
|
||||
(VM.getVM().getInterpreter().contains(sender_pc) ||
|
||||
VM.getVM().getStubRoutines().returnsToCallStub(sender_pc))) {
|
||||
// C2 prologue saves EBP in the usual place.
|
||||
// however only use it if the sender had link infomration in it.
|
||||
saved_fp = sender_sp.getAddressAt(-2 * VM.getVM().getAddressSize());
|
||||
}
|
||||
|
||||
return new X86Frame(sender_sp, saved_fp, sender_pc);
|
||||
return new X86Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC);
|
||||
}
|
||||
|
||||
protected boolean hasSenderPD() {
|
||||
|
@ -403,14 +450,6 @@ public class X86Frame extends Frame {
|
|||
|
||||
public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); }
|
||||
|
||||
public Address compiledArgumentToLocationPD(VMReg reg, RegisterMap regMap, int argSize) {
|
||||
if (VM.getVM().isCore() || VM.getVM().isClientCompiler()) {
|
||||
throw new RuntimeException("Should not reach here");
|
||||
}
|
||||
|
||||
return oopMapRegToLocation(reg, regMap);
|
||||
}
|
||||
|
||||
public Address addressOfInterpreterFrameLocals() {
|
||||
return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.runtime.x86;
|
||||
|
||||
import java.util.*;
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class X86RicochetFrame extends VMObject {
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("MethodHandles::RicochetFrame");
|
||||
|
||||
senderLinkField = type.getAddressField("_sender_link");
|
||||
savedArgsBaseField = type.getAddressField("_saved_args_base");
|
||||
exactSenderSPField = type.getAddressField("_exact_sender_sp");
|
||||
senderPCField = type.getAddressField("_sender_pc");
|
||||
}
|
||||
|
||||
private static AddressField senderLinkField;
|
||||
private static AddressField savedArgsBaseField;
|
||||
private static AddressField exactSenderSPField;
|
||||
private static AddressField senderPCField;
|
||||
|
||||
static X86RicochetFrame fromFrame(X86Frame f) {
|
||||
return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset()));
|
||||
}
|
||||
|
||||
private X86RicochetFrame(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public Address senderLink() {
|
||||
return senderLinkField.getValue(addr);
|
||||
}
|
||||
public Address senderLinkAddress() {
|
||||
return addr.addOffsetTo(senderLinkField.getOffset());
|
||||
}
|
||||
public Address savedArgsBase() {
|
||||
return savedArgsBaseField.getValue(addr);
|
||||
}
|
||||
public Address extendedSenderSP() {
|
||||
return savedArgsBase();
|
||||
}
|
||||
public Address exactSenderSP() {
|
||||
return exactSenderSPField.getValue(addr);
|
||||
}
|
||||
public Address senderPC() {
|
||||
return senderPCField.getValue(addr);
|
||||
}
|
||||
}
|
|
@ -244,24 +244,6 @@ SUNWprivate_1.1 {
|
|||
JVM_Yield;
|
||||
JVM_handle_linux_signal;
|
||||
|
||||
# Old reflection routines
|
||||
# These do not need to be present in the product build in JDK 1.4
|
||||
# but their code has not been removed yet because there will not
|
||||
# be a substantial code savings until JVM_InvokeMethod and
|
||||
# JVM_NewInstanceFromConstructor can also be removed; see
|
||||
# reflectionCompat.hpp.
|
||||
JVM_GetClassConstructor;
|
||||
JVM_GetClassConstructors;
|
||||
JVM_GetClassField;
|
||||
JVM_GetClassFields;
|
||||
JVM_GetClassMethod;
|
||||
JVM_GetClassMethods;
|
||||
JVM_GetField;
|
||||
JVM_GetPrimitiveField;
|
||||
JVM_NewInstance;
|
||||
JVM_SetField;
|
||||
JVM_SetPrimitiveField;
|
||||
|
||||
# debug JVM
|
||||
JVM_AccessVMBooleanFlag;
|
||||
JVM_AccessVMIntFlag;
|
||||
|
|
|
@ -244,24 +244,6 @@ SUNWprivate_1.1 {
|
|||
JVM_Yield;
|
||||
JVM_handle_linux_signal;
|
||||
|
||||
# Old reflection routines
|
||||
# These do not need to be present in the product build in JDK 1.4
|
||||
# but their code has not been removed yet because there will not
|
||||
# be a substantial code savings until JVM_InvokeMethod and
|
||||
# JVM_NewInstanceFromConstructor can also be removed; see
|
||||
# reflectionCompat.hpp.
|
||||
JVM_GetClassConstructor;
|
||||
JVM_GetClassConstructors;
|
||||
JVM_GetClassField;
|
||||
JVM_GetClassFields;
|
||||
JVM_GetClassMethod;
|
||||
JVM_GetClassMethods;
|
||||
JVM_GetField;
|
||||
JVM_GetPrimitiveField;
|
||||
JVM_NewInstance;
|
||||
JVM_SetField;
|
||||
JVM_SetPrimitiveField;
|
||||
|
||||
# miscellaneous functions
|
||||
jio_fprintf;
|
||||
jio_printf;
|
||||
|
|
|
@ -41,8 +41,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
|
|||
|
||||
# Linker mapfiles
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
|
||||
|
||||
# This mapfile is only needed when compiling with dtrace support,
|
||||
# and mustn't be otherwise.
|
||||
|
|
|
@ -107,8 +107,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
|
|||
|
||||
# Linker mapfiles
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
|
||||
|
||||
# This mapfile is only needed when compiling with dtrace support,
|
||||
# and mustn't be otherwise.
|
||||
|
|
|
@ -44,8 +44,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
|
|||
|
||||
# Linker mapfiles
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
|
||||
|
||||
# This mapfile is only needed when compiling with dtrace support,
|
||||
# and mustn't be otherwise.
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
# Define public interface.
|
||||
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
# Old reflection routines
|
||||
# These do not need to be present in the product build in JDK 1.4
|
||||
# but their code has not been removed yet because there will not
|
||||
# be a substantial code savings until JVM_InvokeMethod and
|
||||
# JVM_NewInstanceFromConstructor can also be removed; see
|
||||
# reflectionCompat.hpp.
|
||||
JVM_GetClassConstructor;
|
||||
JVM_GetClassConstructors;
|
||||
JVM_GetClassField;
|
||||
JVM_GetClassFields;
|
||||
JVM_GetClassMethod;
|
||||
JVM_GetClassMethods;
|
||||
JVM_GetField;
|
||||
JVM_GetPrimitiveField;
|
||||
JVM_NewInstance;
|
||||
JVM_SetField;
|
||||
JVM_SetPrimitiveField;
|
||||
};
|
|
@ -48,9 +48,7 @@ endif # Platform_compiler == sparcWorks
|
|||
CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
|
||||
|
||||
# Linker mapfiles
|
||||
# NOTE: inclusion of nonproduct mapfile not necessary; read it for details
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers
|
||||
|
||||
# This mapfile is only needed when compiling with dtrace support,
|
||||
# and mustn't be otherwise.
|
||||
|
|
|
@ -58,13 +58,9 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
|
|||
# to inhibit the effect of the previous line on CFLAGS.
|
||||
|
||||
# Linker mapfiles
|
||||
# NOTE: inclusion of nonproduct mapfile not necessary; read it for details
|
||||
ifdef USE_GCC
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers
|
||||
else
|
||||
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
|
||||
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
|
||||
|
||||
ifndef USE_GCC
|
||||
# This mapfile is only needed when compiling with dtrace support,
|
||||
# and mustn't be otherwise.
|
||||
MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
|
||||
|
|
|
@ -1794,7 +1794,8 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
|
|||
mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
|
||||
stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
|
||||
|
||||
set((intptr_t)real_msg, O1);
|
||||
// Size of set() should stay the same
|
||||
patchable_set((intptr_t)real_msg, O1);
|
||||
// Load address to call to into O7
|
||||
load_ptr_contents(a, O7);
|
||||
// Register call to verify_oop_subroutine
|
||||
|
@ -1831,7 +1832,8 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char
|
|||
ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
|
||||
stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
|
||||
|
||||
set((intptr_t)real_msg, O1);
|
||||
// Size of set() should stay the same
|
||||
patchable_set((intptr_t)real_msg, O1);
|
||||
// Load address to call to into O7
|
||||
load_ptr_contents(a, O7);
|
||||
// Register call to verify_oop_subroutine
|
||||
|
@ -1976,7 +1978,8 @@ void MacroAssembler::stop(const char* msg) {
|
|||
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
|
||||
|
||||
// stop_subroutine expects message pointer in I1.
|
||||
set((intptr_t)msg, O1);
|
||||
// Size of set() should stay the same
|
||||
patchable_set((intptr_t)msg, O1);
|
||||
|
||||
// factor long stop-sequence into subroutine to save space
|
||||
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
|
||||
|
@ -1998,7 +2001,8 @@ void MacroAssembler::warn(const char* msg) {
|
|||
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
|
||||
RegistersForDebugging::save_registers(this);
|
||||
mov(O0, L0);
|
||||
set((intptr_t)msg, O0);
|
||||
// Size of set() should stay the same
|
||||
patchable_set((intptr_t)msg, O0);
|
||||
call( CAST_FROM_FN_PTR(address, warning) );
|
||||
delayed()->nop();
|
||||
// ret();
|
||||
|
@ -4901,3 +4905,65 @@ void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
|
|||
// Caller should set it:
|
||||
// add(G0, 1, result); // equals
|
||||
}
|
||||
|
||||
// Use BIS for zeroing (count is in bytes).
|
||||
void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
|
||||
assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
|
||||
Register end = count;
|
||||
int cache_line_size = VM_Version::prefetch_data_size();
|
||||
// Minimum count when BIS zeroing can be used since
|
||||
// it needs membar which is expensive.
|
||||
int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
|
||||
|
||||
Label small_loop;
|
||||
// Check if count is negative (dead code) or zero.
|
||||
// Note, count uses 64bit in 64 bit VM.
|
||||
cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
|
||||
|
||||
// Use BIS zeroing only for big arrays since it requires membar.
|
||||
if (Assembler::is_simm13(block_zero_size)) { // < 4096
|
||||
cmp(count, block_zero_size);
|
||||
} else {
|
||||
set(block_zero_size, temp);
|
||||
cmp(count, temp);
|
||||
}
|
||||
br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
|
||||
delayed()->add(to, count, end);
|
||||
|
||||
// Note: size is >= three (32 bytes) cache lines.
|
||||
|
||||
// Clean the beginning of space up to next cache line.
|
||||
for (int offs = 0; offs < cache_line_size; offs += 8) {
|
||||
stx(G0, to, offs);
|
||||
}
|
||||
|
||||
// align to next cache line
|
||||
add(to, cache_line_size, to);
|
||||
and3(to, -cache_line_size, to);
|
||||
|
||||
// Note: size left >= two (32 bytes) cache lines.
|
||||
|
||||
// BIS should not be used to zero tail (64 bytes)
|
||||
// to avoid zeroing a header of the following object.
|
||||
sub(end, (cache_line_size*2)-8, end);
|
||||
|
||||
Label bis_loop;
|
||||
bind(bis_loop);
|
||||
stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
|
||||
add(to, cache_line_size, to);
|
||||
cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
|
||||
|
||||
// BIS needs membar.
|
||||
membar(Assembler::StoreLoad);
|
||||
|
||||
add(end, (cache_line_size*2)-8, end); // restore end
|
||||
cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
|
||||
|
||||
// Clean the tail.
|
||||
bind(small_loop);
|
||||
stx(G0, to, 0);
|
||||
add(to, 8, to);
|
||||
cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
|
||||
nop(); // Separate short branches
|
||||
}
|
||||
|
||||
|
|
|
@ -886,6 +886,7 @@ class Assembler : public AbstractAssembler {
|
|||
|
||||
enum ASIs { // page 72, v9
|
||||
ASI_PRIMARY = 0x80,
|
||||
ASI_PRIMARY_NOFAULT = 0x82,
|
||||
ASI_PRIMARY_LITTLE = 0x88,
|
||||
// Block initializing store
|
||||
ASI_ST_BLKINIT_PRIMARY = 0xE2,
|
||||
|
@ -1786,9 +1787,12 @@ public:
|
|||
rs1(s) |
|
||||
op3(wrreg_op3) |
|
||||
u_field(2, 29, 25) |
|
||||
u_field(1, 13, 13) |
|
||||
immed(true) |
|
||||
simm(simm13a, 13)); }
|
||||
inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
|
||||
inline void wrasi(Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
|
||||
// wrasi(d, imm) stores (d xor imm) to asi
|
||||
inline void wrasi(Register d, int simm13a) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) |
|
||||
u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
|
||||
|
||||
|
||||
|
@ -2625,6 +2629,8 @@ public:
|
|||
void char_arrays_equals(Register ary1, Register ary2,
|
||||
Register limit, Register result,
|
||||
Register chr1, Register chr2, Label& Ldone);
|
||||
// Use BIS for zeroing
|
||||
void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
|
||||
|
||||
#undef VIRTUAL
|
||||
|
||||
|
|
|
@ -142,11 +142,6 @@ LIR_Opr LIR_Assembler::receiverOpr() {
|
|||
}
|
||||
|
||||
|
||||
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
|
||||
return FrameMap::I0_oop_opr;
|
||||
}
|
||||
|
||||
|
||||
LIR_Opr LIR_Assembler::osrBufferPointer() {
|
||||
return FrameMap::I0_opr;
|
||||
}
|
||||
|
|
|
@ -782,13 +782,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
|||
}
|
||||
break;
|
||||
|
||||
case jvmti_exception_throw_id:
|
||||
{ // Oexception : exception
|
||||
__ set_info("jvmti_exception_throw", dont_gc_arguments);
|
||||
oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
|
||||
}
|
||||
break;
|
||||
|
||||
case dtrace_object_alloc_id:
|
||||
{ // O0: object
|
||||
__ set_info("dtrace_object_alloc", dont_gc_arguments);
|
||||
|
|
|
@ -156,9 +156,16 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
|||
#endif // _LP64
|
||||
}
|
||||
|
||||
typedef void (*_zero_Fn)(HeapWord* to, size_t count);
|
||||
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
|
||||
|
||||
if (value == 0 && UseBlockZeroing &&
|
||||
(count > (BlockZeroingLowLimit >> LogHeapWordSize))) {
|
||||
// Call it only when block zeroing is used
|
||||
((_zero_Fn)StubRoutines::zero_aligned_words())(tohw, count);
|
||||
} else {
|
||||
julong* to = (julong*)tohw;
|
||||
julong v = ((julong)value << 32) | value;
|
||||
// If count is odd, odd will be equal to 1 on 32-bit platform
|
||||
|
@ -176,6 +183,7 @@ static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value)
|
|||
*((juint*)to) = value;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
|
||||
|
|
|
@ -259,13 +259,8 @@
|
|||
};
|
||||
#endif /* CC_INTERP */
|
||||
|
||||
// the compiler frame has many of the same fields as the interpreter frame
|
||||
// %%%%% factor out declarations of the shared fields
|
||||
enum compiler_frame_fixed_locals {
|
||||
compiler_frame_d_scratch_fp_offset = -2,
|
||||
compiler_frame_vm_locals_fp_offset = -2, // should be same as above
|
||||
|
||||
compiler_frame_vm_local_words = -compiler_frame_vm_locals_fp_offset
|
||||
compiler_frame_vm_locals_fp_offset = -2
|
||||
};
|
||||
|
||||
private:
|
||||
|
@ -283,9 +278,6 @@
|
|||
|
||||
inline void interpreter_frame_set_tos_address(intptr_t* x);
|
||||
|
||||
|
||||
// %%%%% Another idea: instead of defining 3 fns per item, just define one returning a ref
|
||||
|
||||
// monitors:
|
||||
|
||||
// next two fns read and write Lmonitors value,
|
||||
|
@ -298,22 +290,8 @@
|
|||
return ((interpreterState)sp_at(interpreter_state_ptr_offset));
|
||||
}
|
||||
|
||||
|
||||
#endif /* CC_INTERP */
|
||||
|
||||
|
||||
|
||||
// Compiled frames
|
||||
|
||||
public:
|
||||
// Tells if this register can hold 64 bits on V9 (really, V8+).
|
||||
static bool holds_a_doubleword(Register reg) {
|
||||
#ifdef _LP64
|
||||
// return true;
|
||||
return reg->is_out() || reg->is_global();
|
||||
#else
|
||||
return reg->is_out() || reg->is_global();
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // CPU_SPARC_VM_FRAME_SPARC_HPP
|
||||
|
|
|
@ -1262,6 +1262,15 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
}
|
||||
break;
|
||||
|
||||
case _adapter_opt_profiling:
|
||||
if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
|
||||
Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
|
||||
__ ld(G3_mh_vmcount, O1_scratch);
|
||||
__ add(O1_scratch, 1, O1_scratch);
|
||||
__ st(O1_scratch, G3_mh_vmcount);
|
||||
}
|
||||
// fall through
|
||||
|
||||
case _adapter_retype_only:
|
||||
case _adapter_retype_raw:
|
||||
// Immediately jump to the next MH layer:
|
||||
|
|
|
@ -460,6 +460,8 @@ source_hpp %{
|
|||
// Must be visible to the DFA in dfa_sparc.cpp
|
||||
extern bool can_branch_register( Node *bol, Node *cmp );
|
||||
|
||||
extern bool use_block_zeroing(Node* count);
|
||||
|
||||
// Macros to extract hi & lo halves from a long pair.
|
||||
// G0 is not part of any long pair, so assert on that.
|
||||
// Prevents accidentally using G1 instead of G0.
|
||||
|
@ -521,6 +523,12 @@ bool can_branch_register( Node *bol, Node *cmp ) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool use_block_zeroing(Node* count) {
|
||||
// Use BIS for zeroing if count is not constant
|
||||
// or it is >= BlockZeroingLowLimit.
|
||||
return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
|
||||
// REQUIRED FUNCTIONALITY
|
||||
|
@ -832,6 +840,7 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
|||
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
|
||||
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
|
||||
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
|
||||
!(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
|
||||
!(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
|
||||
!(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
|
||||
!(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
|
||||
|
@ -2810,25 +2819,6 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
|||
__ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
|
||||
%}
|
||||
|
||||
// Compiler ensures base is doubleword aligned and cnt is count of doublewords
|
||||
enc_class enc_Clear_Array(iRegX cnt, iRegP base, iRegX temp) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
Register nof_bytes_arg = reg_to_register_object($cnt$$reg);
|
||||
Register nof_bytes_tmp = reg_to_register_object($temp$$reg);
|
||||
Register base_pointer_arg = reg_to_register_object($base$$reg);
|
||||
|
||||
Label loop;
|
||||
__ mov(nof_bytes_arg, nof_bytes_tmp);
|
||||
|
||||
// Loop and clear, walking backwards through the array.
|
||||
// nof_bytes_tmp (if >0) is always the number of bytes to zero
|
||||
__ bind(loop);
|
||||
__ deccc(nof_bytes_tmp, 8);
|
||||
__ br(Assembler::greaterEqual, true, Assembler::pt, loop);
|
||||
__ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
|
||||
// %%%% this mini-loop must not cross a cache boundary!
|
||||
%}
|
||||
|
||||
|
||||
enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
|
||||
Label Ldone, Lloop;
|
||||
|
@ -10257,9 +10247,9 @@ instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o
|
|||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
// Count and Base registers are fixed because the allocator cannot
|
||||
// kill unknown registers. The encodings are generic.
|
||||
// The encodings are generic.
|
||||
instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
|
||||
predicate(!use_block_zeroing(n->in(2)) );
|
||||
match(Set dummy (ClearArray cnt base));
|
||||
effect(TEMP temp, KILL ccr);
|
||||
ins_cost(300);
|
||||
|
@ -10267,7 +10257,71 @@ instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg
|
|||
"loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
|
||||
" BRge loop\t\t! Clearing loop\n"
|
||||
" STX G0,[$base+$temp]\t! delay slot" %}
|
||||
ins_encode( enc_Clear_Array(cnt, base, temp) );
|
||||
|
||||
ins_encode %{
|
||||
// Compiler ensures base is doubleword aligned and cnt is count of doublewords
|
||||
Register nof_bytes_arg = $cnt$$Register;
|
||||
Register nof_bytes_tmp = $temp$$Register;
|
||||
Register base_pointer_arg = $base$$Register;
|
||||
|
||||
Label loop;
|
||||
__ mov(nof_bytes_arg, nof_bytes_tmp);
|
||||
|
||||
// Loop and clear, walking backwards through the array.
|
||||
// nof_bytes_tmp (if >0) is always the number of bytes to zero
|
||||
__ bind(loop);
|
||||
__ deccc(nof_bytes_tmp, 8);
|
||||
__ br(Assembler::greaterEqual, true, Assembler::pt, loop);
|
||||
__ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
|
||||
// %%%% this mini-loop must not cross a cache boundary!
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
|
||||
predicate(use_block_zeroing(n->in(2)));
|
||||
match(Set dummy (ClearArray cnt base));
|
||||
effect(USE_KILL cnt, USE_KILL base, KILL ccr);
|
||||
ins_cost(300);
|
||||
format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
|
||||
|
||||
ins_encode %{
|
||||
|
||||
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
|
||||
Register to = $base$$Register;
|
||||
Register count = $cnt$$Register;
|
||||
|
||||
Label Ldone;
|
||||
__ nop(); // Separate short branches
|
||||
// Use BIS for zeroing (temp is not used).
|
||||
__ bis_zeroing(to, count, G0, Ldone);
|
||||
__ bind(Ldone);
|
||||
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
|
||||
predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
|
||||
match(Set dummy (ClearArray cnt base));
|
||||
effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
|
||||
ins_cost(300);
|
||||
format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
|
||||
|
||||
ins_encode %{
|
||||
|
||||
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
|
||||
Register to = $base$$Register;
|
||||
Register count = $cnt$$Register;
|
||||
Register temp = $tmp$$Register;
|
||||
|
||||
Label Ldone;
|
||||
__ nop(); // Separate short branches
|
||||
// Use BIS for zeroing
|
||||
__ bis_zeroing(to, count, temp, Ldone);
|
||||
__ bind(Ldone);
|
||||
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
|
|
|
@ -1124,6 +1124,126 @@ class StubGenerator: public StubCodeGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Generate main code for disjoint arraycopy
|
||||
//
|
||||
typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
|
||||
Label& L_loop, bool use_prefetch, bool use_bis);
|
||||
|
||||
void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
|
||||
int iter_size, CopyLoopFunc copy_loop_func) {
|
||||
Label L_copy;
|
||||
|
||||
assert(log2_elem_size <= 3, "the following code should be changed");
|
||||
int count_dec = 16>>log2_elem_size;
|
||||
|
||||
int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
|
||||
assert(prefetch_dist < 4096, "invalid value");
|
||||
prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
|
||||
int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
|
||||
|
||||
if (UseBlockCopy) {
|
||||
Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
|
||||
|
||||
// 64 bytes tail + bytes copied in one loop iteration
|
||||
int tail_size = 64 + iter_size;
|
||||
int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
|
||||
// Use BIS copy only for big arrays since it requires membar.
|
||||
__ set(block_copy_count, O4);
|
||||
__ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
|
||||
// This code is for disjoint source and destination:
|
||||
// to <= from || to >= from+count
|
||||
// but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
|
||||
__ sub(from, to, O4);
|
||||
__ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
|
||||
__ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
|
||||
|
||||
__ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
|
||||
// BIS should not be used to copy tail (64 bytes+iter_size)
|
||||
// to avoid zeroing of following values.
|
||||
__ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
|
||||
|
||||
if (prefetch_count > 0) { // rounded up to one iteration count
|
||||
// Do prefetching only if copy size is bigger
|
||||
// than prefetch distance.
|
||||
__ set(prefetch_count, O4);
|
||||
__ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
|
||||
__ sub(count, prefetch_count, count);
|
||||
|
||||
(this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
|
||||
__ add(count, prefetch_count, count); // restore count
|
||||
|
||||
} // prefetch_count > 0
|
||||
|
||||
(this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
|
||||
__ add(count, (tail_size>>log2_elem_size), count); // restore count
|
||||
|
||||
__ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
|
||||
// BIS needs membar.
|
||||
__ membar(Assembler::StoreLoad);
|
||||
// Copy tail
|
||||
__ ba_short(L_copy);
|
||||
|
||||
__ BIND(L_skip_block_copy);
|
||||
} // UseBlockCopy
|
||||
|
||||
if (prefetch_count > 0) { // rounded up to one iteration count
|
||||
// Do prefetching only if copy size is bigger
|
||||
// than prefetch distance.
|
||||
__ set(prefetch_count, O4);
|
||||
__ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
|
||||
__ sub(count, prefetch_count, count);
|
||||
|
||||
Label L_copy_prefetch;
|
||||
(this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
|
||||
__ add(count, prefetch_count, count); // restore count
|
||||
|
||||
} // prefetch_count > 0
|
||||
|
||||
(this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Helper methods for copy_16_bytes_forward_with_shift()
|
||||
//
|
||||
void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
|
||||
Label& L_loop, bool use_prefetch, bool use_bis) {
|
||||
|
||||
const Register left_shift = G1; // left shift bit counter
|
||||
const Register right_shift = G5; // right shift bit counter
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
if (use_prefetch) {
|
||||
if (ArraycopySrcPrefetchDistance > 0) {
|
||||
__ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
|
||||
}
|
||||
if (ArraycopyDstPrefetchDistance > 0) {
|
||||
__ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
|
||||
}
|
||||
}
|
||||
__ ldx(from, 0, O4);
|
||||
__ ldx(from, 8, G4);
|
||||
__ inc(to, 16);
|
||||
__ inc(from, 16);
|
||||
__ deccc(count, count_dec); // Can we do next iteration after this one?
|
||||
__ srlx(O4, right_shift, G3);
|
||||
__ bset(G3, O3);
|
||||
__ sllx(O4, left_shift, O4);
|
||||
__ srlx(G4, right_shift, G3);
|
||||
__ bset(G3, O4);
|
||||
if (use_bis) {
|
||||
__ stxa(O3, to, -16);
|
||||
__ stxa(O4, to, -8);
|
||||
} else {
|
||||
__ stx(O3, to, -16);
|
||||
__ stx(O4, to, -8);
|
||||
}
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
|
||||
__ delayed()->sllx(G4, left_shift, O3);
|
||||
}
|
||||
|
||||
// Copy big chunks forward with shift
|
||||
//
|
||||
|
@ -1135,8 +1255,10 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// L_copy_bytes - copy exit label
|
||||
//
|
||||
void copy_16_bytes_forward_with_shift(Register from, Register to,
|
||||
Register count, int count_dec, Label& L_copy_bytes) {
|
||||
Label L_loop, L_aligned_copy, L_copy_last_bytes;
|
||||
Register count, int log2_elem_size, Label& L_copy_bytes) {
|
||||
Label L_aligned_copy, L_copy_last_bytes;
|
||||
assert(log2_elem_size <= 3, "the following code should be changed");
|
||||
int count_dec = 16>>log2_elem_size;
|
||||
|
||||
// if both arrays have the same alignment mod 8, do 8 bytes aligned copy
|
||||
__ andcc(from, 7, G1); // misaligned bytes
|
||||
|
@ -1154,27 +1276,13 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Load 2 aligned 8-bytes chunks and use one from previous iteration
|
||||
// to form 2 aligned 8-bytes chunks to store.
|
||||
//
|
||||
__ deccc(count, count_dec); // Pre-decrement 'count'
|
||||
__ dec(count, count_dec); // Pre-decrement 'count'
|
||||
__ andn(from, 7, from); // Align address
|
||||
__ ldx(from, 0, O3);
|
||||
__ inc(from, 8);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
__ ldx(from, 0, O4);
|
||||
__ deccc(count, count_dec); // Can we do next iteration after this one?
|
||||
__ ldx(from, 8, G4);
|
||||
__ inc(to, 16);
|
||||
__ inc(from, 16);
|
||||
__ sllx(O3, left_shift, O3);
|
||||
__ srlx(O4, right_shift, G3);
|
||||
__ bset(G3, O3);
|
||||
__ stx(O3, to, -16);
|
||||
__ sllx(O4, left_shift, O4);
|
||||
__ srlx(G4, right_shift, G3);
|
||||
__ bset(G3, O4);
|
||||
__ stx(O4, to, -8);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
|
||||
__ delayed()->mov(G4, O3);
|
||||
|
||||
disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
|
||||
|
||||
__ inccc(count, count_dec>>1 ); // + 8 bytes
|
||||
__ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
|
||||
|
@ -1184,7 +1292,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ ldx(from, 0, O4);
|
||||
__ inc(to, 8);
|
||||
__ inc(from, 8);
|
||||
__ sllx(O3, left_shift, O3);
|
||||
__ srlx(O4, right_shift, G3);
|
||||
__ bset(O3, G3);
|
||||
__ stx(G3, to, -8);
|
||||
|
@ -1348,7 +1455,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// The compare above (count >= 23) guarantes 'count' >= 16 bytes.
|
||||
// Also jump over aligned copy after the copy with shift completed.
|
||||
|
||||
copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
|
||||
copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
|
||||
}
|
||||
|
||||
// Both array are 8 bytes aligned, copy 16 bytes at a time
|
||||
|
@ -1576,7 +1683,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// The compare above (count >= 11) guarantes 'count' >= 16 bytes.
|
||||
// Also jump over aligned copy after the copy with shift completed.
|
||||
|
||||
copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
|
||||
copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
|
||||
}
|
||||
|
||||
// Both array are 8 bytes aligned, copy 16 bytes at a time
|
||||
|
@ -1949,6 +2056,45 @@ class StubGenerator: public StubCodeGenerator {
|
|||
return start;
|
||||
}
|
||||
|
||||
//
|
||||
// Helper methods for generate_disjoint_int_copy_core()
|
||||
//
|
||||
void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
|
||||
Label& L_loop, bool use_prefetch, bool use_bis) {
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
if (use_prefetch) {
|
||||
if (ArraycopySrcPrefetchDistance > 0) {
|
||||
__ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
|
||||
}
|
||||
if (ArraycopyDstPrefetchDistance > 0) {
|
||||
__ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
|
||||
}
|
||||
}
|
||||
__ ldx(from, 4, O4);
|
||||
__ ldx(from, 12, G4);
|
||||
__ inc(to, 16);
|
||||
__ inc(from, 16);
|
||||
__ deccc(count, 4); // Can we do next iteration after this one?
|
||||
|
||||
__ srlx(O4, 32, G3);
|
||||
__ bset(G3, O3);
|
||||
__ sllx(O4, 32, O4);
|
||||
__ srlx(G4, 32, G3);
|
||||
__ bset(G3, O4);
|
||||
if (use_bis) {
|
||||
__ stxa(O3, to, -16);
|
||||
__ stxa(O4, to, -8);
|
||||
} else {
|
||||
__ stx(O3, to, -16);
|
||||
__ stx(O4, to, -8);
|
||||
}
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
|
||||
__ delayed()->sllx(G4, 32, O3);
|
||||
|
||||
}
|
||||
|
||||
//
|
||||
// Generate core code for disjoint int copy (and oop copy on 32-bit).
|
||||
// If "aligned" is true, the "from" and "to" addresses are assumed
|
||||
|
@ -1962,7 +2108,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
void generate_disjoint_int_copy_core(bool aligned) {
|
||||
|
||||
Label L_skip_alignment, L_aligned_copy;
|
||||
Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
|
||||
Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
|
||||
|
||||
const Register from = O0; // source array address
|
||||
const Register to = O1; // destination array address
|
||||
|
@ -2013,30 +2159,16 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
// copy with shift 4 elements (16 bytes) at a time
|
||||
__ dec(count, 4); // The cmp at the beginning guaranty count >= 4
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_16_bytes);
|
||||
__ ldx(from, 4, O4);
|
||||
__ deccc(count, 4); // Can we do next iteration after this one?
|
||||
__ ldx(from, 12, G4);
|
||||
__ inc(to, 16);
|
||||
__ inc(from, 16);
|
||||
__ sllx(O3, 32, O3);
|
||||
__ srlx(O4, 32, G3);
|
||||
__ bset(G3, O3);
|
||||
__ stx(O3, to, -16);
|
||||
__ sllx(O4, 32, O4);
|
||||
__ srlx(G4, 32, G3);
|
||||
__ bset(G3, O4);
|
||||
__ stx(O4, to, -8);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
|
||||
__ delayed()->mov(G4, O3);
|
||||
|
||||
disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
|
||||
|
||||
__ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
|
||||
__ delayed()->inc(count, 4); // restore 'count'
|
||||
|
||||
__ BIND(L_aligned_copy);
|
||||
}
|
||||
} // !aligned
|
||||
|
||||
// copy 4 elements (16 bytes) at a time
|
||||
__ and3(count, 1, G4); // Save
|
||||
__ srl(count, 1, count);
|
||||
|
@ -2222,6 +2354,38 @@ class StubGenerator: public StubCodeGenerator {
|
|||
return start;
|
||||
}
|
||||
|
||||
//
|
||||
// Helper methods for generate_disjoint_long_copy_core()
|
||||
//
|
||||
void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
|
||||
Label& L_loop, bool use_prefetch, bool use_bis) {
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_loop);
|
||||
for (int off = 0; off < 64; off += 16) {
|
||||
if (use_prefetch && (off & 31) == 0) {
|
||||
if (ArraycopySrcPrefetchDistance > 0) {
|
||||
__ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
|
||||
}
|
||||
if (ArraycopyDstPrefetchDistance > 0) {
|
||||
__ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
|
||||
}
|
||||
}
|
||||
__ ldx(from, off+0, O4);
|
||||
__ ldx(from, off+8, O5);
|
||||
if (use_bis) {
|
||||
__ stxa(O4, to, off+0);
|
||||
__ stxa(O5, to, off+8);
|
||||
} else {
|
||||
__ stx(O4, to, off+0);
|
||||
__ stx(O5, to, off+8);
|
||||
}
|
||||
}
|
||||
__ deccc(count, 8);
|
||||
__ inc(from, 64);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
|
||||
__ delayed()->inc(to, 64);
|
||||
}
|
||||
|
||||
//
|
||||
// Generate core code for disjoint long copy (and oop copy on 64-bit).
|
||||
// "aligned" is ignored, because we must make the stronger
|
||||
|
@ -2267,7 +2431,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ delayed()->add(offset0, 8, offset8);
|
||||
|
||||
// Copy by 64 bytes chunks
|
||||
Label L_copy_64_bytes;
|
||||
|
||||
const Register from64 = O3; // source address
|
||||
const Register to64 = G3; // destination address
|
||||
__ subcc(count, 6, O3);
|
||||
|
@ -2275,24 +2439,14 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ delayed()->mov(to, to64);
|
||||
// Now we can use O4(offset0), O5(offset8) as temps
|
||||
__ mov(O3, count);
|
||||
// count >= 0 (original count - 8)
|
||||
__ mov(from, from64);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_copy_64_bytes);
|
||||
for( int off = 0; off < 64; off += 16 ) {
|
||||
__ ldx(from64, off+0, O4);
|
||||
__ ldx(from64, off+8, O5);
|
||||
__ stx(O4, to64, off+0);
|
||||
__ stx(O5, to64, off+8);
|
||||
}
|
||||
__ deccc(count, 8);
|
||||
__ inc(from64, 64);
|
||||
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
|
||||
__ delayed()->inc(to64, 64);
|
||||
disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop);
|
||||
|
||||
// Restore O4(offset0), O5(offset8)
|
||||
__ sub(from64, from, offset0);
|
||||
__ inccc(count, 6);
|
||||
__ inccc(count, 6); // restore count
|
||||
__ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
|
||||
__ delayed()->add(offset0, 8, offset8);
|
||||
|
||||
|
@ -3069,6 +3223,34 @@ class StubGenerator: public StubCodeGenerator {
|
|||
return start;
|
||||
}
|
||||
|
||||
//
|
||||
// Generate stub for heap zeroing.
|
||||
// "to" address is aligned to jlong (8 bytes).
|
||||
//
|
||||
// Arguments for generated stub:
|
||||
// to: O0
|
||||
// count: O1 treated as signed (count of HeapWord)
|
||||
// count could be 0
|
||||
//
|
||||
address generate_zero_aligned_words(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
const Register to = O0; // source array address
|
||||
const Register count = O1; // HeapWords count
|
||||
const Register temp = O2; // scratch
|
||||
|
||||
Label Ldone;
|
||||
__ sllx(count, LogHeapWordSize, count); // to bytes count
|
||||
// Use BIS for zeroing
|
||||
__ bis_zeroing(to, count, temp, Ldone);
|
||||
__ bind(Ldone);
|
||||
__ retl();
|
||||
__ delayed()->nop();
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
address entry;
|
||||
address entry_jbyte_arraycopy;
|
||||
|
@ -3195,6 +3377,10 @@ class StubGenerator: public StubCodeGenerator {
|
|||
StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
|
||||
StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
|
||||
StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
|
||||
|
||||
if (UseBlockZeroing) {
|
||||
StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
|
||||
}
|
||||
}
|
||||
|
||||
void generate_initial() {
|
||||
|
|
|
@ -3374,7 +3374,7 @@ void TemplateTable::_new() {
|
|||
|
||||
if(UseTLAB) {
|
||||
Register RoldTopValue = RallocatedObject;
|
||||
Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
|
||||
Register RtlabWasteLimitValue = G3_scratch;
|
||||
Register RnewTopValue = G1_scratch;
|
||||
Register RendValue = Rscratch;
|
||||
Register RfreeValue = RnewTopValue;
|
||||
|
@ -3455,7 +3455,11 @@ void TemplateTable::_new() {
|
|||
__ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
|
||||
|
||||
// initialize remaining object fields
|
||||
{ Label loop;
|
||||
if (UseBlockZeroing) {
|
||||
// Use BIS for zeroing
|
||||
__ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
|
||||
} else {
|
||||
Label loop;
|
||||
__ subcc(Roffset, wordSize, Roffset);
|
||||
__ bind(loop);
|
||||
//__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
|
||||
|
|
|
@ -75,6 +75,24 @@ void VM_Version::initialize() {
|
|||
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
|
||||
}
|
||||
|
||||
if (has_v9()) {
|
||||
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
|
||||
if (ArraycopySrcPrefetchDistance >= 4096)
|
||||
ArraycopySrcPrefetchDistance = 4064;
|
||||
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
|
||||
if (ArraycopyDstPrefetchDistance >= 4096)
|
||||
ArraycopyDstPrefetchDistance = 4064;
|
||||
} else {
|
||||
if (ArraycopySrcPrefetchDistance > 0) {
|
||||
warning("prefetch instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
|
||||
}
|
||||
if (ArraycopyDstPrefetchDistance > 0) {
|
||||
warning("prefetch instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
|
||||
}
|
||||
}
|
||||
|
||||
UseSSE = 0; // Only on x86 and x64
|
||||
|
||||
_supports_cx8 = has_v9();
|
||||
|
@ -170,6 +188,26 @@ void VM_Version::initialize() {
|
|||
FLAG_SET_DEFAULT(UseCBCond, false);
|
||||
}
|
||||
|
||||
assert(BlockZeroingLowLimit > 0, "invalid value");
|
||||
if (has_block_zeroing()) {
|
||||
if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
|
||||
FLAG_SET_DEFAULT(UseBlockZeroing, true);
|
||||
}
|
||||
} else if (UseBlockZeroing) {
|
||||
warning("BIS zeroing instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseBlockZeroing, false);
|
||||
}
|
||||
|
||||
assert(BlockCopyLowLimit > 0, "invalid value");
|
||||
if (has_block_zeroing()) { // has_blk_init() && is_T4(): core's local L2 cache
|
||||
if (FLAG_IS_DEFAULT(UseBlockCopy)) {
|
||||
FLAG_SET_DEFAULT(UseBlockCopy, true);
|
||||
}
|
||||
} else if (UseBlockCopy) {
|
||||
warning("BIS instructions are not available or expensive on this CPU");
|
||||
FLAG_SET_DEFAULT(UseBlockCopy, false);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// T4 and newer Sparc cpus have fast RDPC.
|
||||
if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) {
|
||||
|
|
|
@ -135,8 +135,8 @@ public:
|
|||
// T4 and newer Sparc have fast RDPC instruction.
|
||||
static bool has_fast_rdpc() { return is_T4(); }
|
||||
|
||||
// T4 and newer Sparc have Most-Recently-Used (MRU) BIS.
|
||||
static bool has_mru_blk_init() { return has_blk_init() && is_T4(); }
|
||||
// On T4 and newer Sparc BIS to the beginning of cache line always zeros it.
|
||||
static bool has_block_zeroing() { return has_blk_init() && is_T4(); }
|
||||
|
||||
static const char* cpu_features() { return _features_str; }
|
||||
|
||||
|
|
|
@ -129,10 +129,6 @@ LIR_Opr LIR_Assembler::receiverOpr() {
|
|||
return FrameMap::receiver_opr;
|
||||
}
|
||||
|
||||
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
|
||||
return receiverOpr();
|
||||
}
|
||||
|
||||
LIR_Opr LIR_Assembler::osrBufferPointer() {
|
||||
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
|
||||
}
|
||||
|
@ -371,55 +367,6 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info)
|
|||
}
|
||||
|
||||
|
||||
void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
|
||||
if (exception->is_valid()) {
|
||||
// preserve exception
|
||||
// note: the monitor_exit runtime call is a leaf routine
|
||||
// and cannot block => no GC can happen
|
||||
// The slow case (MonitorAccessStub) uses the first two stack slots
|
||||
// ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
|
||||
__ movptr (Address(rsp, 2*wordSize), exception);
|
||||
}
|
||||
|
||||
Register obj_reg = obj_opr->as_register();
|
||||
Register lock_reg = lock_opr->as_register();
|
||||
|
||||
// setup registers (lock_reg must be rax, for lock_object)
|
||||
assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here");
|
||||
Register hdr = lock_reg;
|
||||
assert(new_hdr == SYNC_header, "wrong register");
|
||||
lock_reg = new_hdr;
|
||||
// compute pointer to BasicLock
|
||||
Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
|
||||
__ lea(lock_reg, lock_addr);
|
||||
// unlock object
|
||||
MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
|
||||
// _slow_case_stubs->append(slow_case);
|
||||
// temporary fix: must be created after exceptionhandler, therefore as call stub
|
||||
_slow_case_stubs->append(slow_case);
|
||||
if (UseFastLocking) {
|
||||
// try inlined fast unlocking first, revert to slow locking if it fails
|
||||
// note: lock_reg points to the displaced header since the displaced header offset is 0!
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
__ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
|
||||
} else {
|
||||
// always do slow unlocking
|
||||
// note: the slow unlocking code could be inlined here, however if we use
|
||||
// slow unlocking, speed doesn't matter anyway and this solution is
|
||||
// simpler and requires less duplicated code - additionally, the
|
||||
// slow unlocking code is the same in either case which simplifies
|
||||
// debugging
|
||||
__ jmp(*slow_case->entry());
|
||||
}
|
||||
// done
|
||||
__ bind(*slow_case->continuation());
|
||||
|
||||
if (exception->is_valid()) {
|
||||
// restore exception
|
||||
__ movptr (exception, Address(rsp, 2 * wordSize));
|
||||
}
|
||||
}
|
||||
|
||||
// This specifies the rsp decrement needed to build the frame
|
||||
int LIR_Assembler::initial_frame_size_in_bytes() {
|
||||
// if rounding, must let FrameMap know!
|
||||
|
|
|
@ -29,8 +29,6 @@
|
|||
|
||||
Address::ScaleFactor array_element_size(BasicType type) const;
|
||||
|
||||
void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception);
|
||||
|
||||
void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
|
||||
|
||||
// helper functions which checks for overflow and sets bailout if it
|
||||
|
|
|
@ -1465,19 +1465,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
|||
}
|
||||
break;
|
||||
|
||||
case jvmti_exception_throw_id:
|
||||
{ // rax,: exception oop
|
||||
StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
|
||||
// Preserve all registers across this potentially blocking call
|
||||
const int num_rt_args = 2; // thread, exception oop
|
||||
OopMap* map = save_live_registers(sasm, num_rt_args);
|
||||
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
|
||||
oop_maps = new OopMapSet();
|
||||
oop_maps->add_gc_map(call_offset, map);
|
||||
restore_live_registers(sasm);
|
||||
}
|
||||
break;
|
||||
|
||||
case dtrace_object_alloc_id:
|
||||
{ // rax,: object
|
||||
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
|
||||
|
|
|
@ -1343,6 +1343,13 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
}
|
||||
break;
|
||||
|
||||
case _adapter_opt_profiling:
|
||||
if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
|
||||
Address rcx_mh_vmcount(rcx_recv, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
|
||||
__ incrementl(rcx_mh_vmcount);
|
||||
}
|
||||
// fall through
|
||||
|
||||
case _adapter_retype_only:
|
||||
case _adapter_retype_raw:
|
||||
// immediately jump to the next MH layer:
|
||||
|
|
|
@ -110,6 +110,7 @@ public:
|
|||
|
||||
class RicochetFrame {
|
||||
friend class MethodHandles;
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
intptr_t* _continuation; // what to do when control gets back here
|
||||
|
|
|
@ -346,7 +346,6 @@ void Compilation::install_code(int frame_size) {
|
|||
implicit_exception_table(),
|
||||
compiler(),
|
||||
_env->comp_level(),
|
||||
true,
|
||||
has_unsafe_access()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -28,8 +28,10 @@
|
|||
#include "c1/c1_Compilation.hpp"
|
||||
#include "c1/c1_GraphBuilder.hpp"
|
||||
#include "c1/c1_InstructionPrinter.hpp"
|
||||
#include "ci/ciCallSite.hpp"
|
||||
#include "ci/ciField.hpp"
|
||||
#include "ci/ciKlass.hpp"
|
||||
#include "ci/ciMethodHandle.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "interpreter/bytecode.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
@ -1424,7 +1426,7 @@ void GraphBuilder::method_return(Value x) {
|
|||
// See whether this is the first return; if so, store off some
|
||||
// of the state for later examination
|
||||
if (num_returns() == 0) {
|
||||
set_inline_cleanup_info(_block, _last, state());
|
||||
set_inline_cleanup_info();
|
||||
}
|
||||
|
||||
// The current bci() is in the wrong scope, so use the bci() of
|
||||
|
@ -1582,6 +1584,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
|||
code = Bytecodes::_invokespecial;
|
||||
}
|
||||
|
||||
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
|
||||
|
||||
// NEEDS_CLEANUP
|
||||
// I've added the target-is_loaded() test below but I don't really understand
|
||||
// how klass->is_loaded() can be true and yet target->is_loaded() is false.
|
||||
|
@ -1693,26 +1697,31 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
|||
&& target->will_link(klass, callee_holder, code)) {
|
||||
// callee is known => check if we have static binding
|
||||
assert(target->is_loaded(), "callee must be known");
|
||||
if (code == Bytecodes::_invokestatic
|
||||
|| code == Bytecodes::_invokespecial
|
||||
|| code == Bytecodes::_invokevirtual && target->is_final_method()
|
||||
) {
|
||||
if (code == Bytecodes::_invokestatic ||
|
||||
code == Bytecodes::_invokespecial ||
|
||||
code == Bytecodes::_invokevirtual && target->is_final_method() ||
|
||||
code == Bytecodes::_invokedynamic) {
|
||||
ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
|
||||
bool success = false;
|
||||
if (target->is_method_handle_invoke()) {
|
||||
// method handle invokes
|
||||
success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
|
||||
}
|
||||
if (!success) {
|
||||
// static binding => check if callee is ok
|
||||
ciMethod* inline_target = (cha_monomorphic_target != NULL)
|
||||
? cha_monomorphic_target
|
||||
: target;
|
||||
bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
|
||||
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
|
||||
}
|
||||
CHECK_BAILOUT();
|
||||
|
||||
#ifndef PRODUCT
|
||||
// printing
|
||||
if (PrintInlining && !res) {
|
||||
if (PrintInlining && !success) {
|
||||
// if it was successfully inlined, then it was already printed.
|
||||
print_inline_result(inline_target, res);
|
||||
print_inline_result(inline_target, success);
|
||||
}
|
||||
#endif
|
||||
clear_inline_bailout();
|
||||
if (res) {
|
||||
if (success) {
|
||||
// Register dependence if JVMTI has either breakpoint
|
||||
// setting or hotswapping of methods capabilities since they may
|
||||
// cause deoptimization.
|
||||
|
@ -1740,7 +1749,6 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
|||
code == Bytecodes::_invokespecial ||
|
||||
code == Bytecodes::_invokevirtual ||
|
||||
code == Bytecodes::_invokeinterface;
|
||||
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
|
||||
ValueType* result_type = as_ValueType(target->return_type());
|
||||
|
||||
// We require the debug info to be the "state before" because
|
||||
|
@ -3038,7 +3046,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
|
|||
INLINE_BAILOUT("disallowed by CompilerOracle")
|
||||
} else if (!callee->can_be_compiled()) {
|
||||
// callee is not compilable (prob. has breakpoints)
|
||||
INLINE_BAILOUT("not compilable")
|
||||
INLINE_BAILOUT("not compilable (disabled)")
|
||||
} else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
|
||||
// intrinsics can be native or not
|
||||
return true;
|
||||
|
@ -3397,7 +3405,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
|
|||
}
|
||||
|
||||
|
||||
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
||||
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
|
||||
assert(!callee->is_native(), "callee must not be native");
|
||||
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
|
||||
INLINE_BAILOUT("inlining prohibited by policy");
|
||||
|
@ -3430,7 +3438,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
} else {
|
||||
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining");
|
||||
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
|
||||
if (callee->code_size() > max_inline_size() ) INLINE_BAILOUT("callee is too large");
|
||||
if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large");
|
||||
|
||||
// don't inline throwable methods unless the inlining tree is rooted in a throwable class
|
||||
if (callee->name() == ciSymbol::object_initializer_name() &&
|
||||
|
@ -3468,7 +3476,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
|
||||
// Insert null check if necessary
|
||||
Value recv = NULL;
|
||||
if (code() != Bytecodes::_invokestatic) {
|
||||
if (code() != Bytecodes::_invokestatic &&
|
||||
code() != Bytecodes::_invokedynamic) {
|
||||
// note: null check must happen even if first instruction of callee does
|
||||
// an implicit null check since the callee is in a different scope
|
||||
// and we must make sure exception handling does the right thing
|
||||
|
@ -3496,7 +3505,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
// fall-through of control flow, all return instructions of the
|
||||
// callee will need to be replaced by Goto's pointing to this
|
||||
// continuation point.
|
||||
BlockBegin* cont = block_at(next_bci());
|
||||
BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci());
|
||||
bool continuation_existed = true;
|
||||
if (cont == NULL) {
|
||||
cont = new BlockBegin(next_bci());
|
||||
|
@ -3608,7 +3617,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
// block merging. This allows load elimination and CSE to take place
|
||||
// across multiple callee scopes if they are relatively simple, and
|
||||
// is currently essential to making inlining profitable.
|
||||
if ( num_returns() == 1
|
||||
if (cont_block == NULL) {
|
||||
if (num_returns() == 1
|
||||
&& block() == orig_block
|
||||
&& block() == inline_cleanup_block()) {
|
||||
_last = inline_cleanup_return_prev();
|
||||
|
@ -3617,7 +3627,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
// Inlining caused that the instructions after the invoke in the
|
||||
// caller are not reachable any more. So skip filling this block
|
||||
// with instructions!
|
||||
assert (cont == continuation(), "");
|
||||
assert(cont == continuation(), "");
|
||||
assert(_last && _last->as_BlockEnd(), "");
|
||||
_skip_block = true;
|
||||
} else {
|
||||
|
@ -3631,6 +3641,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
_skip_block = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fill the exception handler for synchronized methods with instructions
|
||||
if (callee->is_synchronized() && sync_handler->state() != NULL) {
|
||||
|
@ -3645,6 +3656,114 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
|||
}
|
||||
|
||||
|
||||
bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
|
||||
assert(!callee->is_static(), "change next line");
|
||||
int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1);
|
||||
Value receiver = state()->stack_at(index);
|
||||
|
||||
if (receiver->type()->is_constant()) {
|
||||
ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle();
|
||||
|
||||
// Set the callee to have access to the class and signature in
|
||||
// the MethodHandleCompiler.
|
||||
method_handle->set_callee(callee);
|
||||
method_handle->set_caller(method());
|
||||
|
||||
// Get an adapter for the MethodHandle.
|
||||
ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter();
|
||||
if (method_handle_adapter != NULL) {
|
||||
return try_inline(method_handle_adapter, /*holder_known=*/ true);
|
||||
}
|
||||
} else if (receiver->as_CheckCast()) {
|
||||
// Match MethodHandle.selectAlternative idiom
|
||||
Phi* phi = receiver->as_CheckCast()->obj()->as_Phi();
|
||||
|
||||
if (phi != NULL && phi->operand_count() == 2) {
|
||||
// Get the two MethodHandle inputs from the Phi.
|
||||
Value op1 = phi->operand_at(0);
|
||||
Value op2 = phi->operand_at(1);
|
||||
ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
|
||||
ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
|
||||
|
||||
// Set the callee to have access to the class and signature in
|
||||
// the MethodHandleCompiler.
|
||||
mh1->set_callee(callee);
|
||||
mh1->set_caller(method());
|
||||
mh2->set_callee(callee);
|
||||
mh2->set_caller(method());
|
||||
|
||||
// Get adapters for the MethodHandles.
|
||||
ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
|
||||
ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
|
||||
|
||||
if (mh1_adapter != NULL && mh2_adapter != NULL) {
|
||||
set_inline_cleanup_info();
|
||||
|
||||
// Build the If guard
|
||||
BlockBegin* one = new BlockBegin(next_bci());
|
||||
BlockBegin* two = new BlockBegin(next_bci());
|
||||
BlockBegin* end = new BlockBegin(next_bci());
|
||||
Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
|
||||
block()->set_end(iff->as_BlockEnd());
|
||||
|
||||
// Connect up the states
|
||||
one->merge(block()->end()->state());
|
||||
two->merge(block()->end()->state());
|
||||
|
||||
// Save the state for the second inlinee
|
||||
ValueStack* state_before = copy_state_before();
|
||||
|
||||
// Parse first adapter
|
||||
_last = _block = one;
|
||||
if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
|
||||
restore_inline_cleanup_info();
|
||||
block()->clear_end(); // remove appended iff
|
||||
return false;
|
||||
}
|
||||
|
||||
// Parse second adapter
|
||||
_last = _block = two;
|
||||
_state = state_before;
|
||||
if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
|
||||
restore_inline_cleanup_info();
|
||||
block()->clear_end(); // remove appended iff
|
||||
return false;
|
||||
}
|
||||
|
||||
connect_to_end(end);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
|
||||
// Get the MethodHandle from the CallSite.
|
||||
ciCallSite* call_site = stream()->get_call_site();
|
||||
ciMethodHandle* method_handle = call_site->get_target();
|
||||
|
||||
// Set the callee to have access to the class and signature in the
|
||||
// MethodHandleCompiler.
|
||||
method_handle->set_callee(callee);
|
||||
method_handle->set_caller(method());
|
||||
|
||||
// Get an adapter for the MethodHandle.
|
||||
ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter();
|
||||
if (method_handle_adapter != NULL) {
|
||||
if (try_inline(method_handle_adapter, /*holder_known=*/ true)) {
|
||||
// Add a dependence for invalidation of the optimization.
|
||||
if (!call_site->is_constant_call_site()) {
|
||||
dependency_recorder()->assert_call_site_target_value(call_site, method_handle);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void GraphBuilder::inline_bailout(const char* msg) {
|
||||
assert(msg != NULL, "inline bailout msg must exist");
|
||||
_inline_bailout_msg = msg;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -315,9 +315,17 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
|||
ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block,
|
||||
return_prev,
|
||||
return_state); }
|
||||
void set_inline_cleanup_info() {
|
||||
set_inline_cleanup_info(_block, _last, _state);
|
||||
}
|
||||
BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); }
|
||||
Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); }
|
||||
ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); }
|
||||
void restore_inline_cleanup_info() {
|
||||
_block = inline_cleanup_block();
|
||||
_last = inline_cleanup_return_prev();
|
||||
_state = inline_cleanup_state();
|
||||
}
|
||||
void incr_num_returns() { scope_data()->incr_num_returns(); }
|
||||
int num_returns() const { return scope_data()->num_returns(); }
|
||||
intx max_inline_size() const { return scope_data()->max_inline_size(); }
|
||||
|
@ -329,11 +337,15 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
|||
void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
|
||||
|
||||
// inliners
|
||||
bool try_inline(ciMethod* callee, bool holder_known);
|
||||
bool try_inline( ciMethod* callee, bool holder_known);
|
||||
bool try_inline_intrinsics(ciMethod* callee);
|
||||
bool try_inline_full (ciMethod* callee, bool holder_known);
|
||||
bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
|
||||
bool try_inline_jsr(int jsr_dest_bci);
|
||||
|
||||
// JSR 292 support
|
||||
bool for_method_handle_inline(ciMethod* callee);
|
||||
bool for_invokedynamic_inline(ciMethod* callee);
|
||||
|
||||
// helpers
|
||||
void inline_bailout(const char* msg);
|
||||
BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);
|
||||
|
|
|
@ -514,28 +514,17 @@ Constant::CompareResult Constant::compare(Instruction::Condition cond, Value rig
|
|||
|
||||
void BlockBegin::set_end(BlockEnd* end) {
|
||||
assert(end != NULL, "should not reset block end to NULL");
|
||||
BlockEnd* old_end = _end;
|
||||
if (end == old_end) {
|
||||
if (end == _end) {
|
||||
return;
|
||||
}
|
||||
// Must make the predecessors/successors match up with the
|
||||
// BlockEnd's notion.
|
||||
int i, n;
|
||||
if (old_end != NULL) {
|
||||
// disconnect from the old end
|
||||
old_end->set_begin(NULL);
|
||||
clear_end();
|
||||
|
||||
// disconnect this block from it's current successors
|
||||
for (i = 0; i < _successors.length(); i++) {
|
||||
_successors.at(i)->remove_predecessor(this);
|
||||
}
|
||||
}
|
||||
// Set the new end
|
||||
_end = end;
|
||||
|
||||
_successors.clear();
|
||||
// Now reset successors list based on BlockEnd
|
||||
n = end->number_of_sux();
|
||||
for (i = 0; i < n; i++) {
|
||||
for (int i = 0; i < end->number_of_sux(); i++) {
|
||||
BlockBegin* sux = end->sux_at(i);
|
||||
_successors.append(sux);
|
||||
sux->_predecessors.append(this);
|
||||
|
@ -544,6 +533,22 @@ void BlockBegin::set_end(BlockEnd* end) {
|
|||
}
|
||||
|
||||
|
||||
void BlockBegin::clear_end() {
|
||||
// Must make the predecessors/successors match up with the
|
||||
// BlockEnd's notion.
|
||||
if (_end != NULL) {
|
||||
// disconnect from the old end
|
||||
_end->set_begin(NULL);
|
||||
|
||||
// disconnect this block from it's current successors
|
||||
for (int i = 0; i < _successors.length(); i++) {
|
||||
_successors.at(i)->remove_predecessor(this);
|
||||
}
|
||||
_end = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void BlockBegin::disconnect_edge(BlockBegin* from, BlockBegin* to) {
|
||||
// disconnect any edges between from and to
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -1601,6 +1601,7 @@ LEAF(BlockBegin, StateSplit)
|
|||
void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
|
||||
void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; }
|
||||
void set_end(BlockEnd* end);
|
||||
void clear_end();
|
||||
void disconnect_from_graph();
|
||||
static void disconnect_edge(BlockBegin* from, BlockBegin* to);
|
||||
BlockBegin* insert_block_between(BlockBegin* sux);
|
||||
|
|
|
@ -121,7 +121,7 @@ void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
|
|||
|
||||
void LIR_Assembler::check_codespace() {
|
||||
CodeSection* cs = _masm->code_section();
|
||||
if (cs->remaining() < (int)(1*K)) {
|
||||
if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
|
||||
BAILOUT("CodeBuffer overflow");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -133,7 +133,6 @@ class LIR_Assembler: public CompilationResourceObj {
|
|||
static bool is_small_constant(LIR_Opr opr);
|
||||
|
||||
static LIR_Opr receiverOpr();
|
||||
static LIR_Opr incomingReceiverOpr();
|
||||
static LIR_Opr osrBufferPointer();
|
||||
|
||||
// stubs
|
||||
|
|
|
@ -2404,7 +2404,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
|
|||
assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
|
||||
|
||||
VMReg name = vm_reg_for_interval(interval);
|
||||
map->set_oop(name);
|
||||
set_oop(map, name);
|
||||
|
||||
// Spill optimization: when the stack value is guaranteed to be always correct,
|
||||
// then it must be added to the oop map even if the interval is currently in a register
|
||||
|
@ -2415,7 +2415,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
|
|||
assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
|
||||
assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
|
||||
|
||||
map->set_oop(frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
|
||||
set_oop(map, frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2424,7 +2424,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
|
|||
assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
|
||||
int locks_count = info->stack()->total_locks_size();
|
||||
for (int i = 0; i < locks_count; i++) {
|
||||
map->set_oop(frame_map()->monitor_object_regname(i));
|
||||
set_oop(map, frame_map()->monitor_object_regname(i));
|
||||
}
|
||||
|
||||
return map;
|
||||
|
|
|
@ -352,6 +352,13 @@ class LinearScan : public CompilationResourceObj {
|
|||
|
||||
MonitorValue* location_for_monitor_index(int monitor_index);
|
||||
LocationValue* location_for_name(int name, Location::Type loc_type);
|
||||
void set_oop(OopMap* map, VMReg name) {
|
||||
if (map->legal_vm_reg_name(name)) {
|
||||
map->set_oop(name);
|
||||
} else {
|
||||
bailout("illegal oopMap register name");
|
||||
}
|
||||
}
|
||||
|
||||
int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
|
||||
int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
|
||||
|
|
|
@ -375,14 +375,6 @@ JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDes
|
|||
JRT_END
|
||||
|
||||
|
||||
JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
|
||||
if (JvmtiExport::can_post_on_exceptions()) {
|
||||
vframeStream vfst(thread, true);
|
||||
address bcp = vfst.method()->bcp_from(vfst.bci());
|
||||
JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
|
||||
}
|
||||
JRT_END
|
||||
|
||||
// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
|
||||
// associated with the top activation record. The inlinee (that is possibly included in the enclosing
|
||||
// method) method oop is passed as an argument. In order to do that it is embedded in the code as
|
||||
|
|
|
@ -65,7 +65,6 @@ class StubAssembler;
|
|||
stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \
|
||||
stub(access_field_patching) \
|
||||
stub(load_klass_patching) \
|
||||
stub(jvmti_exception_throw) \
|
||||
stub(g1_pre_barrier_slow) \
|
||||
stub(g1_post_barrier_slow) \
|
||||
stub(fpu2long_stub) \
|
||||
|
@ -141,7 +140,6 @@ class Runtime1: public AllStatic {
|
|||
static void unimplemented_entry (JavaThread* thread, StubID id);
|
||||
|
||||
static address exception_handler_for_pc(JavaThread* thread);
|
||||
static void post_jvmti_exception_throw(JavaThread* thread);
|
||||
|
||||
static void throw_range_check_exception(JavaThread* thread, int index);
|
||||
static void throw_index_exception(JavaThread* thread, int index);
|
||||
|
|
|
@ -278,7 +278,7 @@
|
|||
product(intx, CompilationRepeat, 0, \
|
||||
"Number of times to recompile method before returning result") \
|
||||
\
|
||||
develop(intx, NMethodSizeLimit, (32*K)*wordSize, \
|
||||
develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
|
||||
"Maximum size of a compiled method.") \
|
||||
\
|
||||
develop(bool, TraceFPUStack, false, \
|
||||
|
|
|
@ -79,6 +79,17 @@ public:
|
|||
assert(i < _limit, "out of Call Profile MorphismLimit");
|
||||
return _receiver[i];
|
||||
}
|
||||
|
||||
// Rescale the current profile based on the incoming scale
|
||||
ciCallProfile rescale(double scale) {
|
||||
assert(scale >= 0 && scale <= 1.0, "out of range");
|
||||
ciCallProfile call = *this;
|
||||
call._count = (int)(call._count * scale);
|
||||
for (int i = 0; i < _morphism; i++) {
|
||||
call._receiver_count[i] = (int)(call._receiver_count[i] * scale);
|
||||
}
|
||||
return call;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CICALLPROFILE_HPP
|
||||
|
|
|
@ -46,9 +46,6 @@ private:
|
|||
ciObject* _object;
|
||||
} _value;
|
||||
|
||||
// Implementation of the print method.
|
||||
void print_impl(outputStream* st);
|
||||
|
||||
public:
|
||||
|
||||
ciConstant() {
|
||||
|
|
|
@ -884,19 +884,31 @@ bool ciEnv::system_dictionary_modification_counter_changed() {
|
|||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciEnv::check_for_system_dictionary_modification
|
||||
// Check for changes to the system dictionary during compilation
|
||||
// class loads, evolution, breakpoints
|
||||
void ciEnv::check_for_system_dictionary_modification(ciMethod* target) {
|
||||
// ciEnv::validate_compile_task_dependencies
|
||||
//
|
||||
// Check for changes during compilation (e.g. class loads, evolution,
|
||||
// breakpoints, call site invalidation).
|
||||
void ciEnv::validate_compile_task_dependencies(ciMethod* target) {
|
||||
if (failing()) return; // no need for further checks
|
||||
|
||||
// Dependencies must be checked when the system dictionary changes.
|
||||
// If logging is enabled all violated dependences will be recorded in
|
||||
// the log. In debug mode check dependencies even if the system
|
||||
// dictionary hasn't changed to verify that no invalid dependencies
|
||||
// were inserted. Any violated dependences in this case are dumped to
|
||||
// the tty.
|
||||
// First, check non-klass dependencies as we might return early and
|
||||
// not check klass dependencies if the system dictionary
|
||||
// modification counter hasn't changed (see below).
|
||||
for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
|
||||
if (deps.is_klass_type()) continue; // skip klass dependencies
|
||||
klassOop witness = deps.check_dependency();
|
||||
if (witness != NULL) {
|
||||
record_failure("invalid non-klass dependency");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Klass dependencies must be checked when the system dictionary
|
||||
// changes. If logging is enabled all violated dependences will be
|
||||
// recorded in the log. In debug mode check dependencies even if
|
||||
// the system dictionary hasn't changed to verify that no invalid
|
||||
// dependencies were inserted. Any violated dependences in this
|
||||
// case are dumped to the tty.
|
||||
bool counter_changed = system_dictionary_modification_counter_changed();
|
||||
bool test_deps = counter_changed;
|
||||
DEBUG_ONLY(test_deps = true);
|
||||
|
@ -904,22 +916,21 @@ void ciEnv::check_for_system_dictionary_modification(ciMethod* target) {
|
|||
|
||||
bool print_failures = false;
|
||||
DEBUG_ONLY(print_failures = !counter_changed);
|
||||
|
||||
bool keep_going = (print_failures || xtty != NULL);
|
||||
|
||||
int violated = 0;
|
||||
int klass_violations = 0;
|
||||
|
||||
for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
|
||||
if (!deps.is_klass_type()) continue; // skip non-klass dependencies
|
||||
klassOop witness = deps.check_dependency();
|
||||
if (witness != NULL) {
|
||||
++violated;
|
||||
klass_violations++;
|
||||
if (print_failures) deps.print_dependency(witness, /*verbose=*/ true);
|
||||
}
|
||||
// If there's no log and we're not sanity-checking, we're done.
|
||||
if (!keep_going) break;
|
||||
}
|
||||
}
|
||||
|
||||
if (violated != 0) {
|
||||
if (klass_violations != 0) {
|
||||
assert(counter_changed, "failed dependencies, but counter didn't change");
|
||||
record_failure("concurrent class loading");
|
||||
}
|
||||
|
@ -938,7 +949,6 @@ void ciEnv::register_method(ciMethod* target,
|
|||
ImplicitExceptionTable* inc_table,
|
||||
AbstractCompiler* compiler,
|
||||
int comp_level,
|
||||
bool has_debug_info,
|
||||
bool has_unsafe_access) {
|
||||
VM_ENTRY_MARK;
|
||||
nmethod* nm = NULL;
|
||||
|
@ -978,8 +988,8 @@ void ciEnv::register_method(ciMethod* target,
|
|||
// Encode the dependencies now, so we can check them right away.
|
||||
dependencies()->encode_content_bytes();
|
||||
|
||||
// Check for {class loads, evolution, breakpoints} during compilation
|
||||
check_for_system_dictionary_modification(target);
|
||||
// Check for {class loads, evolution, breakpoints, ...} during compilation
|
||||
validate_compile_task_dependencies(target);
|
||||
}
|
||||
|
||||
methodHandle method(THREAD, target->get_methodOop());
|
||||
|
@ -1033,7 +1043,6 @@ void ciEnv::register_method(ciMethod* target,
|
|||
CompileBroker::handle_full_code_cache();
|
||||
}
|
||||
} else {
|
||||
NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
|
||||
nm->set_has_unsafe_access(has_unsafe_access);
|
||||
|
||||
// Record successful registration.
|
||||
|
|
|
@ -247,9 +247,9 @@ private:
|
|||
// Is this thread currently in the VM state?
|
||||
static bool is_in_vm();
|
||||
|
||||
// Helper routine for determining the validity of a compilation
|
||||
// with respect to concurrent class loading.
|
||||
void check_for_system_dictionary_modification(ciMethod* target);
|
||||
// Helper routine for determining the validity of a compilation with
|
||||
// respect to method dependencies (e.g. concurrent class loading).
|
||||
void validate_compile_task_dependencies(ciMethod* target);
|
||||
|
||||
public:
|
||||
enum {
|
||||
|
@ -317,8 +317,7 @@ public:
|
|||
ImplicitExceptionTable* inc_table,
|
||||
AbstractCompiler* compiler,
|
||||
int comp_level,
|
||||
bool has_debug_info = true,
|
||||
bool has_unsafe_access = false);
|
||||
bool has_unsafe_access);
|
||||
|
||||
|
||||
// Access to certain well known ciObjects.
|
||||
|
|
|
@ -64,9 +64,6 @@ private:
|
|||
// shared constructor code
|
||||
void initialize_from(fieldDescriptor* fd);
|
||||
|
||||
// The implementation of the print method.
|
||||
void print_impl(outputStream* st);
|
||||
|
||||
public:
|
||||
ciFlags flags() { return _flags; }
|
||||
|
||||
|
@ -178,7 +175,12 @@ public:
|
|||
bool is_volatile () { return flags().is_volatile(); }
|
||||
bool is_transient () { return flags().is_transient(); }
|
||||
|
||||
bool is_call_site_target() { return ((holder() == CURRENT_ENV->CallSite_klass()) && (name() == ciSymbol::target_name())); }
|
||||
bool is_call_site_target() {
|
||||
ciInstanceKlass* callsite_klass = CURRENT_ENV->CallSite_klass();
|
||||
if (callsite_klass == NULL)
|
||||
return false;
|
||||
return (holder()->is_subclass_of(callsite_klass) && (name() == ciSymbol::target_name()));
|
||||
}
|
||||
|
||||
// Debugging output
|
||||
void print();
|
||||
|
|
|
@ -1016,6 +1016,34 @@ int ciMethod::highest_osr_comp_level() {
|
|||
return get_methodOop()->highest_osr_comp_level();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::code_size_for_inlining
|
||||
//
|
||||
// Code size for inlining decisions.
|
||||
//
|
||||
// Don't fully count method handle adapters against inlining budgets:
|
||||
// the metric we use here is the number of call sites in the adapter
|
||||
// as they are probably the instructions which generate some code.
|
||||
int ciMethod::code_size_for_inlining() {
|
||||
check_is_loaded();
|
||||
|
||||
// Method handle adapters
|
||||
if (is_method_handle_adapter()) {
|
||||
// Count call sites
|
||||
int call_site_count = 0;
|
||||
ciBytecodeStream iter(this);
|
||||
while (iter.next() != ciBytecodeStream::EOBC()) {
|
||||
if (Bytecodes::is_invoke(iter.cur_bc())) {
|
||||
call_site_count++;
|
||||
}
|
||||
}
|
||||
return call_site_count;
|
||||
}
|
||||
|
||||
// Normal method
|
||||
return code_size();
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::instructions_size
|
||||
//
|
||||
|
|
|
@ -157,6 +157,9 @@ class ciMethod : public ciObject {
|
|||
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
|
||||
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
|
||||
|
||||
// Code size for inlining decisions.
|
||||
int code_size_for_inlining();
|
||||
|
||||
int comp_level();
|
||||
int highest_osr_comp_level();
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
// ciMethodHandle::get_adapter
|
||||
//
|
||||
// Return an adapter for this MethodHandle.
|
||||
ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
|
||||
ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) {
|
||||
VM_ENTRY_MARK;
|
||||
Handle h(get_oop());
|
||||
methodHandle callee(_callee->get_methodOop());
|
||||
|
@ -73,7 +73,7 @@ ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
|
|||
// ciMethodHandle::get_adapter
|
||||
//
|
||||
// Return an adapter for this MethodHandle.
|
||||
ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
|
||||
ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) {
|
||||
ciMethod* result = get_adapter_impl(is_invokedynamic);
|
||||
if (result) {
|
||||
// Fake up the MDO maturity.
|
||||
|
@ -86,11 +86,22 @@ ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
|
|||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethodHandle::print_impl
|
||||
// ciMethodHandle::print_chain_impl
|
||||
//
|
||||
// Implementation of the print method.
|
||||
void ciMethodHandle::print_impl(outputStream* st) {
|
||||
st->print(" type=");
|
||||
get_oop()->print();
|
||||
void ciMethodHandle::print_chain_impl(outputStream* st) {
|
||||
ASSERT_IN_VM;
|
||||
MethodHandleChain::print(get_oop());
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethodHandle::print_chain
|
||||
//
|
||||
// Implementation of the print_chain method.
|
||||
void ciMethodHandle::print_chain(outputStream* st) {
|
||||
GUARDED_VM_ENTRY(print_chain_impl(st););
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -37,19 +37,23 @@ private:
|
|||
ciMethod* _callee;
|
||||
ciMethod* _caller;
|
||||
ciCallProfile _profile;
|
||||
ciMethod* _method_handle_adapter;
|
||||
ciMethod* _invokedynamic_adapter;
|
||||
|
||||
// Return an adapter for this MethodHandle.
|
||||
ciMethod* get_adapter_impl(bool is_invokedynamic) const;
|
||||
ciMethod* get_adapter( bool is_invokedynamic) const;
|
||||
ciMethod* get_adapter_impl(bool is_invokedynamic);
|
||||
ciMethod* get_adapter( bool is_invokedynamic);
|
||||
|
||||
protected:
|
||||
void print_impl(outputStream* st);
|
||||
void print_chain_impl(outputStream* st) PRODUCT_RETURN;
|
||||
|
||||
public:
|
||||
ciMethodHandle(instanceHandle h_i) :
|
||||
ciInstance(h_i),
|
||||
_callee(NULL),
|
||||
_caller(NULL)
|
||||
_caller(NULL),
|
||||
_method_handle_adapter(NULL),
|
||||
_invokedynamic_adapter(NULL)
|
||||
{}
|
||||
|
||||
// What kind of ciObject is this?
|
||||
|
@ -60,10 +64,22 @@ public:
|
|||
void set_call_profile(ciCallProfile profile) { _profile = profile; }
|
||||
|
||||
// Return an adapter for a MethodHandle call.
|
||||
ciMethod* get_method_handle_adapter() const { return get_adapter(false); }
|
||||
ciMethod* get_method_handle_adapter() {
|
||||
if (_method_handle_adapter == NULL) {
|
||||
_method_handle_adapter = get_adapter(false);
|
||||
}
|
||||
return _method_handle_adapter;
|
||||
}
|
||||
|
||||
// Return an adapter for an invokedynamic call.
|
||||
ciMethod* get_invokedynamic_adapter() const { return get_adapter(true); }
|
||||
ciMethod* get_invokedynamic_adapter() {
|
||||
if (_invokedynamic_adapter == NULL) {
|
||||
_invokedynamic_adapter = get_adapter(true);
|
||||
}
|
||||
return _invokedynamic_adapter;
|
||||
}
|
||||
|
||||
void print_chain(outputStream* st = tty) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIMETHODHANDLE_HPP
|
||||
|
|
|
@ -194,16 +194,26 @@ bool ciObject::can_be_constant() {
|
|||
// ciObject::should_be_constant()
|
||||
bool ciObject::should_be_constant() {
|
||||
if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant
|
||||
if (!JavaObjectsInPerm && !is_null_object()) {
|
||||
if (is_null_object()) return true;
|
||||
|
||||
ciEnv* env = CURRENT_ENV;
|
||||
if (!JavaObjectsInPerm) {
|
||||
// We want Strings and Classes to be embeddable by default since
|
||||
// they used to be in the perm world. Not all Strings used to be
|
||||
// embeddable but there's no easy way to distinguish the interned
|
||||
// from the regulars ones so just treat them all that way.
|
||||
ciEnv* env = CURRENT_ENV;
|
||||
if (klass() == env->String_klass() || klass() == env->Class_klass()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (EnableInvokeDynamic &&
|
||||
(klass()->is_subclass_of(env->MethodHandle_klass()) ||
|
||||
klass()->is_subclass_of(env->CallSite_klass()))) {
|
||||
assert(ScavengeRootsInCode >= 1, "must be");
|
||||
// We want to treat these aggressively.
|
||||
return true;
|
||||
}
|
||||
|
||||
return handle() == NULL || is_perm();
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,8 @@ public:
|
|||
// Return current ByteCode and increment PC to next bytecode, skipping all
|
||||
// intermediate constants. Returns EOBC at end.
|
||||
// Expected usage:
|
||||
// while( (bc = iter.next()) != EOBC() ) { ... }
|
||||
// ciBytecodeStream iter(m);
|
||||
// while (iter.next() != ciBytecodeStream::EOBC()) { ... }
|
||||
Bytecodes::Code next() {
|
||||
_bc_start = _pc; // Capture start of bc
|
||||
if( _pc >= _end ) return EOBC(); // End-Of-Bytecodes
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/debugInfo.hpp"
|
||||
#include "code/pcDesc.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
@ -2323,6 +2324,8 @@ int java_lang_invoke_BoundMethodHandle::_vmargslot_offset;
|
|||
|
||||
int java_lang_invoke_AdapterMethodHandle::_conversion_offset;
|
||||
|
||||
int java_lang_invoke_CountingMethodHandle::_vmcount_offset;
|
||||
|
||||
void java_lang_invoke_MethodHandle::compute_offsets() {
|
||||
klassOop k = SystemDictionary::MethodHandle_klass();
|
||||
if (k != NULL && EnableInvokeDynamic) {
|
||||
|
@ -2371,6 +2374,23 @@ void java_lang_invoke_AdapterMethodHandle::compute_offsets() {
|
|||
}
|
||||
}
|
||||
|
||||
void java_lang_invoke_CountingMethodHandle::compute_offsets() {
|
||||
klassOop k = SystemDictionary::CountingMethodHandle_klass();
|
||||
if (k != NULL && EnableInvokeDynamic) {
|
||||
compute_offset(_vmcount_offset, k, vmSymbols::vmcount_name(), vmSymbols::int_signature(), true);
|
||||
}
|
||||
}
|
||||
|
||||
int java_lang_invoke_CountingMethodHandle::vmcount(oop mh) {
|
||||
assert(is_instance(mh), "CMH only");
|
||||
return mh->int_field(_vmcount_offset);
|
||||
}
|
||||
|
||||
void java_lang_invoke_CountingMethodHandle::set_vmcount(oop mh, int count) {
|
||||
assert(is_instance(mh), "CMH only");
|
||||
mh->int_field_put(_vmcount_offset, count);
|
||||
}
|
||||
|
||||
oop java_lang_invoke_MethodHandle::type(oop mh) {
|
||||
return mh->obj_field(_type_offset);
|
||||
}
|
||||
|
@ -2674,6 +2694,17 @@ void java_lang_invoke_CallSite::compute_offsets() {
|
|||
if (k != NULL) {
|
||||
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
|
||||
}
|
||||
|
||||
// Disallow compilation of CallSite.setTargetNormal and CallSite.setTargetVolatile
|
||||
// (For C2: keep this until we have throttling logic for uncommon traps.)
|
||||
if (k != NULL) {
|
||||
instanceKlass* ik = instanceKlass::cast(k);
|
||||
methodOop m_normal = ik->lookup_method(vmSymbols::setTargetNormal_name(), vmSymbols::setTarget_signature());
|
||||
methodOop m_volatile = ik->lookup_method(vmSymbols::setTargetVolatile_name(), vmSymbols::setTarget_signature());
|
||||
guarantee(m_normal && m_volatile, "must exist");
|
||||
m_normal->set_not_compilable_quietly();
|
||||
m_volatile->set_not_compilable_quietly();
|
||||
}
|
||||
}
|
||||
|
||||
oop java_lang_invoke_CallSite::target(oop site) {
|
||||
|
@ -3031,6 +3062,7 @@ void JavaClasses::compute_offsets() {
|
|||
java_lang_invoke_MethodType::compute_offsets();
|
||||
java_lang_invoke_MethodTypeForm::compute_offsets();
|
||||
java_lang_invoke_CallSite::compute_offsets();
|
||||
java_lang_invoke_CountingMethodHandle::compute_offsets();
|
||||
}
|
||||
java_security_AccessControlContext::compute_offsets();
|
||||
// Initialize reflection classes. The layouts of these classes
|
||||
|
|
|
@ -981,6 +981,34 @@ class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodH
|
|||
};
|
||||
|
||||
|
||||
// A simple class that maintains an invocation count
|
||||
class java_lang_invoke_CountingMethodHandle: public java_lang_invoke_MethodHandle {
|
||||
friend class JavaClasses;
|
||||
|
||||
private:
|
||||
static int _vmcount_offset;
|
||||
static void compute_offsets();
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
static int vmcount(oop mh);
|
||||
static void set_vmcount(oop mh, int count);
|
||||
|
||||
// Testers
|
||||
static bool is_subclass(klassOop klass) {
|
||||
return SystemDictionary::CountingMethodHandle_klass() != NULL &&
|
||||
Klass::cast(klass)->is_subclass_of(SystemDictionary::CountingMethodHandle_klass());
|
||||
}
|
||||
static bool is_instance(oop obj) {
|
||||
return obj != NULL && is_subclass(obj->klass());
|
||||
}
|
||||
|
||||
// Accessors for code generation:
|
||||
static int vmcount_offset_in_bytes() { return _vmcount_offset; }
|
||||
};
|
||||
|
||||
|
||||
|
||||
// Interface to java.lang.invoke.MemberName objects
|
||||
// (These are a private interface for Java code to query the class hierarchy.)
|
||||
|
||||
|
|
|
@ -155,6 +155,7 @@ class SymbolPropertyTable;
|
|||
template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \
|
||||
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
|
||||
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
|
||||
template(CountingMethodHandle_klass, java_lang_invoke_CountingMethodHandle, Opt) \
|
||||
template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \
|
||||
template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \
|
||||
template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \
|
||||
|
|
|
@ -218,6 +218,7 @@
|
|||
template(returnType_name, "returnType") \
|
||||
template(signature_name, "signature") \
|
||||
template(slot_name, "slot") \
|
||||
template(selectAlternative_name, "selectAlternative") \
|
||||
\
|
||||
/* Support for annotations (JDK 1.5 and above) */ \
|
||||
\
|
||||
|
@ -246,9 +247,11 @@
|
|||
template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \
|
||||
template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \
|
||||
template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \
|
||||
template(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \
|
||||
template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \
|
||||
template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \
|
||||
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
|
||||
template(java_lang_invoke_CountingMethodHandle, "java/lang/invoke/CountingMethodHandle") \
|
||||
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
|
||||
template(findMethodHandleType_name, "findMethodHandleType") \
|
||||
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
|
||||
|
@ -258,8 +261,12 @@
|
|||
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
|
||||
template(makeDynamicCallSite_name, "makeDynamicCallSite") \
|
||||
template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \
|
||||
template(setTargetNormal_name, "setTargetNormal") \
|
||||
template(setTargetVolatile_name, "setTargetVolatile") \
|
||||
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
|
||||
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
|
||||
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
|
||||
template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
|
||||
\
|
||||
/* common method and field names */ \
|
||||
template(object_initializer_name, "<init>") \
|
||||
|
@ -344,6 +351,7 @@
|
|||
template(vmmethod_name, "vmmethod") \
|
||||
template(vmtarget_name, "vmtarget") \
|
||||
template(vmentry_name, "vmentry") \
|
||||
template(vmcount_name, "vmcount") \
|
||||
template(vmslots_name, "vmslots") \
|
||||
template(vmlayout_name, "vmlayout") \
|
||||
template(vmindex_name, "vmindex") \
|
||||
|
@ -907,6 +915,8 @@
|
|||
do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \
|
||||
do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \
|
||||
\
|
||||
do_intrinsic(_selectAlternative, java_lang_invoke_MethodHandleImpl, selectAlternative_name, selectAlternative_signature, F_S) \
|
||||
\
|
||||
/* unboxing methods: */ \
|
||||
do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \
|
||||
do_name( booleanValue_name, "booleanValue") \
|
||||
|
|
|
@ -113,9 +113,9 @@ void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
|
|||
assert_common_1(no_finalizable_subclasses, ctxk);
|
||||
}
|
||||
|
||||
void Dependencies::assert_call_site_target_value(ciKlass* ctxk, ciCallSite* call_site, ciMethodHandle* method_handle) {
|
||||
check_ctxk(ctxk);
|
||||
assert_common_3(call_site_target_value, ctxk, call_site, method_handle);
|
||||
void Dependencies::assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle) {
|
||||
check_ctxk(call_site->klass());
|
||||
assert_common_2(call_site_target_value, call_site, method_handle);
|
||||
}
|
||||
|
||||
// Helper function. If we are adding a new dep. under ctxk2,
|
||||
|
@ -135,7 +135,7 @@ bool Dependencies::maybe_merge_ctxk(GrowableArray<ciObject*>* deps,
|
|||
}
|
||||
}
|
||||
|
||||
void Dependencies::assert_common_1(Dependencies::DepType dept, ciObject* x) {
|
||||
void Dependencies::assert_common_1(DepType dept, ciObject* x) {
|
||||
assert(dep_args(dept) == 1, "sanity");
|
||||
log_dependency(dept, x);
|
||||
GrowableArray<ciObject*>* deps = _deps[dept];
|
||||
|
@ -148,21 +148,37 @@ void Dependencies::assert_common_1(Dependencies::DepType dept, ciObject* x) {
|
|||
}
|
||||
}
|
||||
|
||||
void Dependencies::assert_common_2(Dependencies::DepType dept,
|
||||
ciKlass* ctxk, ciObject* x) {
|
||||
assert(dep_context_arg(dept) == 0, "sanity");
|
||||
void Dependencies::assert_common_2(DepType dept,
|
||||
ciObject* x0, ciObject* x1) {
|
||||
assert(dep_args(dept) == 2, "sanity");
|
||||
log_dependency(dept, ctxk, x);
|
||||
log_dependency(dept, x0, x1);
|
||||
GrowableArray<ciObject*>* deps = _deps[dept];
|
||||
|
||||
// see if the same (or a similar) dep is already recorded
|
||||
if (note_dep_seen(dept, x)) {
|
||||
bool has_ctxk = has_explicit_context_arg(dept);
|
||||
if (has_ctxk) {
|
||||
assert(dep_context_arg(dept) == 0, "sanity");
|
||||
if (note_dep_seen(dept, x1)) {
|
||||
// look in this bucket for redundant assertions
|
||||
const int stride = 2;
|
||||
for (int i = deps->length(); (i -= stride) >= 0; ) {
|
||||
ciObject* x1 = deps->at(i+1);
|
||||
if (x == x1) { // same subject; check the context
|
||||
if (maybe_merge_ctxk(deps, i+0, ctxk)) {
|
||||
ciObject* y1 = deps->at(i+1);
|
||||
if (x1 == y1) { // same subject; check the context
|
||||
if (maybe_merge_ctxk(deps, i+0, x0->as_klass())) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(dep_implicit_context_arg(dept) == 0, "sanity");
|
||||
if (note_dep_seen(dept, x0) && note_dep_seen(dept, x1)) {
|
||||
// look in this bucket for redundant assertions
|
||||
const int stride = 2;
|
||||
for (int i = deps->length(); (i -= stride) >= 0; ) {
|
||||
ciObject* y0 = deps->at(i+0);
|
||||
ciObject* y1 = deps->at(i+1);
|
||||
if (x0 == y0 && x1 == y1) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -170,11 +186,11 @@ void Dependencies::assert_common_2(Dependencies::DepType dept,
|
|||
}
|
||||
|
||||
// append the assertion in the correct bucket:
|
||||
deps->append(ctxk);
|
||||
deps->append(x);
|
||||
deps->append(x0);
|
||||
deps->append(x1);
|
||||
}
|
||||
|
||||
void Dependencies::assert_common_3(Dependencies::DepType dept,
|
||||
void Dependencies::assert_common_3(DepType dept,
|
||||
ciKlass* ctxk, ciObject* x, ciObject* x2) {
|
||||
assert(dep_context_arg(dept) == 0, "sanity");
|
||||
assert(dep_args(dept) == 3, "sanity");
|
||||
|
@ -361,7 +377,7 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
|
|||
3, // unique_concrete_subtypes_2 ctxk, k1, k2
|
||||
3, // unique_concrete_methods_2 ctxk, m1, m2
|
||||
1, // no_finalizable_subclasses ctxk
|
||||
3 // call_site_target_value ctxk, call_site, method_handle
|
||||
2 // call_site_target_value call_site, method_handle
|
||||
};
|
||||
|
||||
const char* Dependencies::dep_name(Dependencies::DepType dept) {
|
||||
|
@ -375,10 +391,7 @@ int Dependencies::dep_args(Dependencies::DepType dept) {
|
|||
}
|
||||
|
||||
void Dependencies::check_valid_dependency_type(DepType dept) {
|
||||
for (int deptv = (int) FIRST_TYPE; deptv < (int) TYPE_LIMIT; deptv++) {
|
||||
if (dept == ((DepType) deptv)) return;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
guarantee(FIRST_TYPE <= dept && dept < TYPE_LIMIT, err_msg("invalid dependency type: %d", (int) dept));
|
||||
}
|
||||
|
||||
// for the sake of the compiler log, print out current dependencies:
|
||||
|
@ -586,8 +599,7 @@ bool Dependencies::DepStream::next() {
|
|||
code_byte -= ctxk_bit;
|
||||
DepType dept = (DepType)code_byte;
|
||||
_type = dept;
|
||||
guarantee((dept - FIRST_TYPE) < (TYPE_LIMIT - FIRST_TYPE),
|
||||
"bad dependency type tag");
|
||||
Dependencies::check_valid_dependency_type(dept);
|
||||
int stride = _dep_args[dept];
|
||||
assert(stride == dep_args(dept), "sanity");
|
||||
int skipj = -1;
|
||||
|
@ -615,18 +627,35 @@ oop Dependencies::DepStream::argument(int i) {
|
|||
|
||||
klassOop Dependencies::DepStream::context_type() {
|
||||
assert(must_be_in_vm(), "raw oops here");
|
||||
int ctxkj = dep_context_arg(_type); // -1 if no context arg
|
||||
if (ctxkj < 0) {
|
||||
return NULL; // for example, evol_method
|
||||
} else {
|
||||
oop k = recorded_oop_at(_xi[ctxkj]);
|
||||
|
||||
// Most dependencies have an explicit context type argument.
|
||||
{
|
||||
int ctxkj = dep_context_arg(_type); // -1 if no explicit context arg
|
||||
if (ctxkj >= 0) {
|
||||
oop k = argument(ctxkj);
|
||||
if (k != NULL) { // context type was not compressed away
|
||||
assert(k->is_klass(), "type check");
|
||||
return (klassOop) k;
|
||||
} else { // recompute "default" context type
|
||||
return ctxk_encoded_as_null(_type, recorded_oop_at(_xi[ctxkj+1]));
|
||||
}
|
||||
// recompute "default" context type
|
||||
return ctxk_encoded_as_null(_type, argument(ctxkj+1));
|
||||
}
|
||||
}
|
||||
|
||||
// Some dependencies are using the klass of the first object
|
||||
// argument as implicit context type (e.g. call_site_target_value).
|
||||
{
|
||||
int ctxkj = dep_implicit_context_arg(_type);
|
||||
if (ctxkj >= 0) {
|
||||
oop k = argument(ctxkj)->klass();
|
||||
assert(k->is_klass(), "type check");
|
||||
return (klassOop) k;
|
||||
}
|
||||
}
|
||||
|
||||
// And some dependencies don't have a context type at all,
|
||||
// e.g. evol_method.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/// Checking dependencies:
|
||||
|
@ -1409,21 +1438,20 @@ klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, KlassD
|
|||
}
|
||||
|
||||
|
||||
klassOop Dependencies::check_call_site_target_value(klassOop ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes) {
|
||||
klassOop Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
|
||||
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "sanity");
|
||||
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
|
||||
if (changes == NULL) {
|
||||
// Validate all CallSites
|
||||
if (java_lang_invoke_CallSite::target(call_site) != method_handle)
|
||||
return ctxk; // assertion failed
|
||||
return call_site->klass(); // assertion failed
|
||||
} else {
|
||||
// Validate the given CallSite
|
||||
if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
|
||||
assert(method_handle != changes->method_handle(), "must be");
|
||||
return ctxk; // assertion failed
|
||||
return call_site->klass(); // assertion failed
|
||||
}
|
||||
}
|
||||
assert(java_lang_invoke_CallSite::target(call_site) == method_handle, "should still be valid");
|
||||
return NULL; // assertion still valid
|
||||
}
|
||||
|
||||
|
@ -1488,7 +1516,7 @@ klassOop Dependencies::DepStream::check_call_site_dependency(CallSiteDepChange*
|
|||
klassOop witness = NULL;
|
||||
switch (type()) {
|
||||
case call_site_target_value:
|
||||
witness = check_call_site_target_value(context_type(), argument(1), argument(2), changes);
|
||||
witness = check_call_site_target_value(argument(0), argument(1), changes);
|
||||
break;
|
||||
default:
|
||||
witness = NULL;
|
||||
|
|
|
@ -166,9 +166,14 @@ class Dependencies: public ResourceObj {
|
|||
LG2_TYPE_LIMIT = 4, // assert(TYPE_LIMIT <= (1<<LG2_TYPE_LIMIT))
|
||||
|
||||
// handy categorizations of dependency types:
|
||||
all_types = ((1<<TYPE_LIMIT)-1) & ((-1)<<FIRST_TYPE),
|
||||
non_ctxk_types = (1<<evol_method),
|
||||
ctxk_types = all_types & ~non_ctxk_types,
|
||||
all_types = ((1 << TYPE_LIMIT) - 1) & ((-1) << FIRST_TYPE),
|
||||
|
||||
non_klass_types = (1 << call_site_target_value),
|
||||
klass_types = all_types & ~non_klass_types,
|
||||
|
||||
non_ctxk_types = (1 << evol_method),
|
||||
implicit_ctxk_types = (1 << call_site_target_value),
|
||||
explicit_ctxk_types = all_types & ~(non_ctxk_types | implicit_ctxk_types),
|
||||
|
||||
max_arg_count = 3, // current maximum number of arguments (incl. ctxk)
|
||||
|
||||
|
@ -184,9 +189,15 @@ class Dependencies: public ResourceObj {
|
|||
|
||||
static const char* dep_name(DepType dept);
|
||||
static int dep_args(DepType dept);
|
||||
static int dep_context_arg(DepType dept) {
|
||||
return dept_in_mask(dept, ctxk_types)? 0: -1;
|
||||
}
|
||||
|
||||
static bool is_klass_type( DepType dept) { return dept_in_mask(dept, klass_types ); }
|
||||
|
||||
static bool has_explicit_context_arg(DepType dept) { return dept_in_mask(dept, explicit_ctxk_types); }
|
||||
static bool has_implicit_context_arg(DepType dept) { return dept_in_mask(dept, implicit_ctxk_types); }
|
||||
|
||||
static int dep_context_arg(DepType dept) { return has_explicit_context_arg(dept) ? 0 : -1; }
|
||||
static int dep_implicit_context_arg(DepType dept) { return has_implicit_context_arg(dept) ? 0 : -1; }
|
||||
|
||||
static void check_valid_dependency_type(DepType dept);
|
||||
|
||||
private:
|
||||
|
@ -250,8 +261,8 @@ class Dependencies: public ResourceObj {
|
|||
}
|
||||
|
||||
void assert_common_1(DepType dept, ciObject* x);
|
||||
void assert_common_2(DepType dept, ciKlass* ctxk, ciObject* x);
|
||||
void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x, ciObject* x2);
|
||||
void assert_common_2(DepType dept, ciObject* x0, ciObject* x1);
|
||||
void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x1, ciObject* x2);
|
||||
|
||||
public:
|
||||
// Adding assertions to a new dependency set at compile time:
|
||||
|
@ -264,7 +275,7 @@ class Dependencies: public ResourceObj {
|
|||
void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2);
|
||||
void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2);
|
||||
void assert_has_no_finalizable_subclasses(ciKlass* ctxk);
|
||||
void assert_call_site_target_value(ciKlass* ctxk, ciCallSite* call_site, ciMethodHandle* method_handle);
|
||||
void assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle);
|
||||
|
||||
// Define whether a given method or type is concrete.
|
||||
// These methods define the term "concrete" as used in this module.
|
||||
|
@ -318,7 +329,7 @@ class Dependencies: public ResourceObj {
|
|||
static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
|
||||
KlassDepChange* changes = NULL);
|
||||
static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes = NULL);
|
||||
static klassOop check_call_site_target_value(klassOop ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
|
||||
static klassOop check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
|
||||
// A returned klassOop is NULL if the dependency assertion is still
|
||||
// valid. A non-NULL klassOop is a 'witness' to the assertion
|
||||
// failure, a point in the class hierarchy where the assertion has
|
||||
|
@ -455,6 +466,8 @@ class Dependencies: public ResourceObj {
|
|||
oop argument(int i); // => recorded_oop_at(argument_index(i))
|
||||
klassOop context_type();
|
||||
|
||||
bool is_klass_type() { return Dependencies::is_klass_type(type()); }
|
||||
|
||||
methodOop method_argument(int i) {
|
||||
oop x = argument(i);
|
||||
assert(x->is_method(), "type");
|
||||
|
|
|
@ -451,7 +451,6 @@ void nmethod::init_defaults() {
|
|||
_stack_traversal_mark = 0;
|
||||
_unload_reported = false; // jvmti state
|
||||
|
||||
NOT_PRODUCT(_has_debug_info = false);
|
||||
#ifdef ASSERT
|
||||
_oops_are_stale = false;
|
||||
#endif
|
||||
|
|
|
@ -191,8 +191,6 @@ class nmethod : public CodeBlob {
|
|||
|
||||
jbyte _scavenge_root_state;
|
||||
|
||||
NOT_PRODUCT(bool _has_debug_info; )
|
||||
|
||||
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
|
||||
// and is not made into a zombie. However, once the nmethod is made into
|
||||
// a zombie, it will be locked one final time if CompiledMethodUnload
|
||||
|
@ -329,11 +327,6 @@ class nmethod : public CodeBlob {
|
|||
methodOop method() const { return _method; }
|
||||
AbstractCompiler* compiler() const { return _compiler; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool has_debug_info() const { return _has_debug_info; }
|
||||
void set_has_debug_info(bool f) { _has_debug_info = false; }
|
||||
#endif // NOT PRODUCT
|
||||
|
||||
// type info
|
||||
bool is_nmethod() const { return true; }
|
||||
bool is_java_method() const { return !method()->is_native(); }
|
||||
|
|
|
@ -30,11 +30,10 @@
|
|||
#include "memory/resourceArea.hpp"
|
||||
|
||||
PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
|
||||
assert(sizeof(PcDescFlags) <= 4, "occupies more than a word");
|
||||
_pc_offset = pc_offset;
|
||||
_scope_decode_offset = scope_decode_offset;
|
||||
_obj_decode_offset = obj_decode_offset;
|
||||
_flags.word = 0;
|
||||
_flags = 0;
|
||||
}
|
||||
|
||||
address PcDesc::real_pc(const nmethod* code) const {
|
||||
|
@ -44,7 +43,7 @@ address PcDesc::real_pc(const nmethod* code) const {
|
|||
void PcDesc::print(nmethod* code) {
|
||||
#ifndef PRODUCT
|
||||
ResourceMark rm;
|
||||
tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags.bits);
|
||||
tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags);
|
||||
|
||||
if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
|
||||
return;
|
||||
|
|
|
@ -39,15 +39,17 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
|||
int _scope_decode_offset; // offset for scope in nmethod
|
||||
int _obj_decode_offset;
|
||||
|
||||
union PcDescFlags {
|
||||
int word;
|
||||
struct {
|
||||
unsigned int reexecute: 1;
|
||||
unsigned int is_method_handle_invoke: 1;
|
||||
unsigned int return_oop: 1;
|
||||
} bits;
|
||||
bool operator ==(const PcDescFlags& other) { return word == other.word; }
|
||||
} _flags;
|
||||
enum {
|
||||
PCDESC_reexecute = 1 << 0,
|
||||
PCDESC_is_method_handle_invoke = 1 << 1,
|
||||
PCDESC_return_oop = 1 << 2
|
||||
};
|
||||
|
||||
int _flags;
|
||||
|
||||
void set_flag(int mask, bool z) {
|
||||
_flags = z ? (_flags | mask) : (_flags & ~mask);
|
||||
}
|
||||
|
||||
public:
|
||||
int pc_offset() const { return _pc_offset; }
|
||||
|
@ -69,8 +71,8 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
|||
};
|
||||
|
||||
// Flags
|
||||
bool should_reexecute() const { return _flags.bits.reexecute; }
|
||||
void set_should_reexecute(bool z) { _flags.bits.reexecute = z; }
|
||||
bool should_reexecute() const { return (_flags & PCDESC_reexecute) != 0; }
|
||||
void set_should_reexecute(bool z) { set_flag(PCDESC_reexecute, z); }
|
||||
|
||||
// Does pd refer to the same information as pd?
|
||||
bool is_same_info(const PcDesc* pd) {
|
||||
|
@ -79,11 +81,11 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
|
|||
_flags == pd->_flags;
|
||||
}
|
||||
|
||||
bool is_method_handle_invoke() const { return _flags.bits.is_method_handle_invoke; }
|
||||
void set_is_method_handle_invoke(bool z) { _flags.bits.is_method_handle_invoke = z; }
|
||||
bool is_method_handle_invoke() const { return (_flags & PCDESC_is_method_handle_invoke) != 0; }
|
||||
void set_is_method_handle_invoke(bool z) { set_flag(PCDESC_is_method_handle_invoke, z); }
|
||||
|
||||
bool return_oop() const { return _flags.bits.return_oop; }
|
||||
void set_return_oop(bool z) { _flags.bits.return_oop = z; }
|
||||
bool return_oop() const { return (_flags & PCDESC_return_oop) != 0; }
|
||||
void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); }
|
||||
|
||||
// Returns the real pc
|
||||
address real_pc(const nmethod* code) const;
|
||||
|
|
|
@ -157,8 +157,14 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
|
|||
// ..and clear it.
|
||||
Copy::zero_to_words(obj, new_tlab_size);
|
||||
} else {
|
||||
// ...and clear just the allocated object.
|
||||
Copy::zero_to_words(obj, size);
|
||||
// ...and zap just allocated object.
|
||||
#ifdef ASSERT
|
||||
// Skip mangling the space corresponding to the object header to
|
||||
// ensure that the returned space is not considered parsable by
|
||||
// any concurrent GC thread.
|
||||
size_t hdr_size = oopDesc::header_size();
|
||||
Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
|
||||
#endif // ASSERT
|
||||
}
|
||||
thread->tlab().fill(obj, obj + size, new_tlab_size);
|
||||
return obj;
|
||||
|
|
|
@ -287,7 +287,10 @@ oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
|
|||
assert(size >= 0, "int won't convert to size_t");
|
||||
HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
|
||||
post_allocation_setup_no_klass_install(klass, obj, size);
|
||||
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
|
||||
#ifndef PRODUCT
|
||||
const size_t hs = oopDesc::header_size();
|
||||
Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs);
|
||||
#endif
|
||||
return (oop)obj;
|
||||
}
|
||||
|
||||
|
|
|
@ -419,6 +419,8 @@ class Bytecodes: AllStatic {
|
|||
|
||||
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|
||||
|| code == _fconst_0 || code == _dconst_0); }
|
||||
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
|
||||
|
||||
static int compute_flags (const char* format, int more_flags = 0); // compute the flags
|
||||
static int flags (int code, bool is_wide) {
|
||||
assert(code == (u_char)code, "must be a byte");
|
||||
|
|
|
@ -555,7 +555,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
|
|||
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
|
||||
|
||||
{
|
||||
// Walk all nmethods depending on CallSite
|
||||
// Walk all nmethods depending on this call site.
|
||||
MutexLocker mu(Compile_lock, thread);
|
||||
Universe::flush_dependents_on(call_site, method_handle);
|
||||
}
|
||||
|
|
|
@ -1203,12 +1203,12 @@ void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
|
|||
// Compute the dependent nmethods that have a reference to a
|
||||
// CallSite object. We use instanceKlass::mark_dependent_nmethod
|
||||
// directly instead of CodeCache::mark_for_deoptimization because we
|
||||
// want dependents on the class CallSite only not all classes in the
|
||||
// ContextStream.
|
||||
// want dependents on the call site class only not all classes in
|
||||
// the ContextStream.
|
||||
int marked = 0;
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
instanceKlass* call_site_klass = instanceKlass::cast(SystemDictionary::CallSite_klass());
|
||||
instanceKlass* call_site_klass = instanceKlass::cast(call_site->klass());
|
||||
marked = call_site_klass->mark_dependent_nmethods(changes);
|
||||
}
|
||||
if (marked > 0) {
|
||||
|
|
|
@ -172,11 +172,6 @@ void constMethodKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
|||
int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
assert(obj->is_constMethod(), "should be constMethod");
|
||||
constMethodOop cm_oop = constMethodOop(obj);
|
||||
#if 0
|
||||
PSParallelCompact::adjust_pointer(cm_oop->adr_method());
|
||||
PSParallelCompact::adjust_pointer(cm_oop->adr_exception_table());
|
||||
PSParallelCompact::adjust_pointer(cm_oop->adr_stackmap_data());
|
||||
#endif
|
||||
oop* const beg_oop = cm_oop->oop_block_beg();
|
||||
oop* const end_oop = cm_oop->oop_block_end();
|
||||
for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
|
||||
|
|
|
@ -63,8 +63,10 @@ constantPoolCacheOop constantPoolCacheKlass::allocate(int length,
|
|||
// CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
|
||||
|
||||
oop obj = CollectedHeap::permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
|
||||
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
|
||||
size));
|
||||
#ifndef PRODUCT
|
||||
const size_t hs = oopDesc::header_size();
|
||||
Universe::heap()->check_for_bad_heap_word_value(((HeapWord*) obj)+hs, size-hs);
|
||||
#endif
|
||||
constantPoolCacheOop cache = (constantPoolCacheOop) obj;
|
||||
assert(!UseConcMarkSweepGC || obj->klass_or_null() == NULL,
|
||||
"klass should be NULL here when using CMS");
|
||||
|
|
|
@ -600,6 +600,11 @@ public:
|
|||
uint taken() {
|
||||
return uint_at(taken_off_set);
|
||||
}
|
||||
|
||||
void set_taken(uint cnt) {
|
||||
set_uint_at(taken_off_set, cnt);
|
||||
}
|
||||
|
||||
// Saturating counter
|
||||
uint inc_taken() {
|
||||
uint cnt = taken() + 1;
|
||||
|
@ -926,6 +931,10 @@ public:
|
|||
return uint_at(not_taken_off_set);
|
||||
}
|
||||
|
||||
void set_not_taken(uint cnt) {
|
||||
set_uint_at(not_taken_off_set, cnt);
|
||||
}
|
||||
|
||||
uint inc_not_taken() {
|
||||
uint cnt = not_taken() + 1;
|
||||
// Did we wrap? Will compiler screw us??
|
||||
|
|
|
@ -914,6 +914,7 @@ methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
|
|||
Symbol* name,
|
||||
Symbol* signature,
|
||||
Handle method_type, TRAPS) {
|
||||
ResourceMark rm;
|
||||
methodHandle empty;
|
||||
|
||||
assert(holder() == SystemDictionary::MethodHandle_klass(),
|
||||
|
|
|
@ -45,7 +45,7 @@ InlineTree::InlineTree(Compile* c,
|
|||
_method(callee),
|
||||
_site_invoke_ratio(site_invoke_ratio),
|
||||
_max_inline_level(max_inline_level),
|
||||
_count_inline_bcs(method()->code_size())
|
||||
_count_inline_bcs(method()->code_size_for_inlining())
|
||||
{
|
||||
NOT_PRODUCT(_count_inlines = 0;)
|
||||
if (_caller_jvms != NULL) {
|
||||
|
@ -107,7 +107,7 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
|
|||
|
||||
// positive filter: should send be inlined? returns NULL (--> yes)
|
||||
// or rejection msg
|
||||
int size = callee_method->code_size();
|
||||
int size = callee_method->code_size_for_inlining();
|
||||
|
||||
// Check for too many throws (and not too huge)
|
||||
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
|
||||
|
@ -141,8 +141,22 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
|
|||
assert(mha_profile, "must exist");
|
||||
CounterData* cd = mha_profile->as_CounterData();
|
||||
invoke_count = cd->count();
|
||||
if (invoke_count == 0) {
|
||||
return "method handle not reached";
|
||||
}
|
||||
|
||||
if (_caller_jvms != NULL && _caller_jvms->method() != NULL &&
|
||||
_caller_jvms->method()->method_data() != NULL &&
|
||||
!_caller_jvms->method()->method_data()->is_empty()) {
|
||||
ciMethodData* mdo = _caller_jvms->method()->method_data();
|
||||
ciProfileData* mha_profile = mdo->bci_to_data(_caller_jvms->bci());
|
||||
assert(mha_profile, "must exist");
|
||||
CounterData* cd = mha_profile->as_CounterData();
|
||||
call_site_count = cd->count();
|
||||
} else {
|
||||
call_site_count = invoke_count; // use the same value
|
||||
}
|
||||
}
|
||||
|
||||
assert(invoke_count != 0, "require invocation count greater than zero");
|
||||
int freq = call_site_count / invoke_count;
|
||||
|
@ -244,7 +258,7 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
|
|||
}
|
||||
|
||||
// use frequency-based objections only for non-trivial methods
|
||||
if (callee_method->code_size() <= MaxTrivialSize) return NULL;
|
||||
if (callee_method->code_size_for_inlining() <= MaxTrivialSize) return NULL;
|
||||
|
||||
// don't use counts with -Xcomp or CTW
|
||||
if (UseInterpreter && !CompileTheWorld) {
|
||||
|
@ -305,7 +319,7 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
|
|||
}
|
||||
|
||||
// suppress a few checks for accessors and trivial methods
|
||||
if (callee_method->code_size() > MaxTrivialSize) {
|
||||
if (callee_method->code_size_for_inlining() > MaxTrivialSize) {
|
||||
|
||||
// don't inline into giant methods
|
||||
if (C->unique() > (uint)NodeCountInliningCutoff) {
|
||||
|
@ -349,7 +363,7 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
|
|||
}
|
||||
}
|
||||
|
||||
int size = callee_method->code_size();
|
||||
int size = callee_method->code_size_for_inlining();
|
||||
|
||||
if (UseOldInlining && ClipInlining
|
||||
&& (int)count_inline_bcs() + size >= DesiredMethodLimit) {
|
||||
|
@ -394,6 +408,16 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
|
|||
return true;
|
||||
}
|
||||
|
||||
//------------------------------check_can_parse--------------------------------
|
||||
const char* InlineTree::check_can_parse(ciMethod* callee) {
|
||||
// Certain methods cannot be parsed at all:
|
||||
if ( callee->is_native()) return "native method";
|
||||
if (!callee->can_be_compiled()) return "not compilable (disabled)";
|
||||
if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)";
|
||||
if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------print_inlining---------------------------------
|
||||
// Really, the failure_msg can be a success message also.
|
||||
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
|
||||
|
@ -423,14 +447,22 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
|||
int caller_bci = jvms->bci();
|
||||
ciMethod *caller_method = jvms->method();
|
||||
|
||||
if( !pass_initial_checks(caller_method, caller_bci, callee_method)) {
|
||||
if( PrintInlining ) {
|
||||
// Do some initial checks.
|
||||
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
|
||||
if (PrintInlining) {
|
||||
failure_msg = "failed_initial_checks";
|
||||
print_inlining( callee_method, caller_bci, failure_msg);
|
||||
print_inlining(callee_method, caller_bci, failure_msg);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Do some parse checks.
|
||||
failure_msg = check_can_parse(callee_method);
|
||||
if (failure_msg != NULL) {
|
||||
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Check if inlining policy says no.
|
||||
WarmCallInfo wci = *(initial_wci);
|
||||
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
|
||||
|
@ -471,7 +503,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
|||
if (failure_msg == NULL) failure_msg = "inline (hot)";
|
||||
|
||||
// Inline!
|
||||
if( PrintInlining ) print_inlining( callee_method, caller_bci, failure_msg);
|
||||
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
|
||||
if (UseOldInlining)
|
||||
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
|
||||
if (InlineWarmCalls && !wci.is_hot())
|
||||
|
@ -481,7 +513,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
|||
|
||||
// Do not inline
|
||||
if (failure_msg == NULL) failure_msg = "too cold to inline";
|
||||
if( PrintInlining ) print_inlining( callee_method, caller_bci, failure_msg);
|
||||
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,12 +61,9 @@ public:
|
|||
{
|
||||
_is_osr = is_osr;
|
||||
_expected_uses = expected_uses;
|
||||
assert(can_parse(method, is_osr), "parse must be possible");
|
||||
assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
|
||||
}
|
||||
|
||||
// Can we build either an OSR or a regular parser for this method?
|
||||
static bool can_parse(ciMethod* method, int is_osr = false);
|
||||
|
||||
virtual bool is_parse() const { return true; }
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
int is_osr() { return _is_osr; }
|
||||
|
@ -152,7 +149,6 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
|||
call->set_optimized_virtual(true);
|
||||
if (method()->is_method_handle_invoke()) {
|
||||
call->set_method_handle_invoke(true);
|
||||
kit.C->set_has_method_handle_invokes(true);
|
||||
}
|
||||
}
|
||||
kit.set_arguments_for_java_call(call);
|
||||
|
@ -210,7 +206,6 @@ JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
|
|||
call->set_optimized_virtual(true);
|
||||
// Take extra care (in the presence of argument motion) not to trash the SP:
|
||||
call->set_method_handle_invoke(true);
|
||||
kit.C->set_has_method_handle_invokes(true);
|
||||
|
||||
// Pass the target MethodHandle as first argument and shift the
|
||||
// other arguments.
|
||||
|
@ -303,20 +298,8 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
|
|||
return kit.transfer_exceptions_into_jvms();
|
||||
}
|
||||
|
||||
bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
|
||||
// Certain methods cannot be parsed at all:
|
||||
if (!m->can_be_compiled()) return false;
|
||||
if (!m->has_balanced_monitors()) return false;
|
||||
if (m->get_flow_analysis()->failing()) return false;
|
||||
|
||||
// (Methods may bail out for other reasons, after the parser is run.
|
||||
// We try to avoid this, but if forced, we must return (Node*)NULL.
|
||||
// The user of the CallGenerator must check for this condition.)
|
||||
return true;
|
||||
}
|
||||
|
||||
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
|
||||
if (!ParseGenerator::can_parse(m)) return NULL;
|
||||
if (InlineTree::check_can_parse(m) != NULL) return NULL;
|
||||
return new ParseGenerator(m, expected_uses);
|
||||
}
|
||||
|
||||
|
@ -324,7 +307,7 @@ CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
|
|||
// for the method execution already in progress, not just the JVMS
|
||||
// of the caller. Thus, this CallGenerator cannot be mixed with others!
|
||||
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
|
||||
if (!ParseGenerator::can_parse(m, true)) return NULL;
|
||||
if (InlineTree::check_can_parse(m) != NULL) return NULL;
|
||||
float past_uses = m->interpreter_invocation_count();
|
||||
float expected_uses = past_uses;
|
||||
return new ParseGenerator(m, expected_uses, true);
|
||||
|
@ -336,7 +319,7 @@ CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj
|
|||
}
|
||||
|
||||
CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
|
||||
assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch");
|
||||
assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
|
||||
return new DynamicCallGenerator(m);
|
||||
}
|
||||
|
||||
|
@ -715,24 +698,36 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
|
|||
// Get an adapter for the MethodHandle.
|
||||
ciMethod* target_method = method_handle->get_method_handle_adapter();
|
||||
if (target_method != NULL) {
|
||||
CallGenerator* hit_cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, 1);
|
||||
if (hit_cg != NULL && hit_cg->is_inline())
|
||||
return hit_cg;
|
||||
CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
|
||||
if (cg != NULL && cg->is_inline())
|
||||
return cg;
|
||||
}
|
||||
} else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
|
||||
method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
|
||||
float prob = PROB_FAIR;
|
||||
Node* meth_region = method_handle->in(0);
|
||||
if (meth_region->is_Region() &&
|
||||
meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() &&
|
||||
meth_region->in(1)->in(0) == meth_region->in(2)->in(0) &&
|
||||
meth_region->in(1)->in(0)->is_If()) {
|
||||
// If diamond, so grab the probability of the test to drive the inlining below
|
||||
prob = meth_region->in(1)->in(0)->as_If()->_prob;
|
||||
if (meth_region->in(1)->is_IfTrue()) {
|
||||
prob = 1 - prob;
|
||||
}
|
||||
}
|
||||
|
||||
// selectAlternative idiom merging two constant MethodHandles.
|
||||
// Generate a guard so that each can be inlined. We might want to
|
||||
// do more inputs at later point but this gets the most common
|
||||
// case.
|
||||
CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
|
||||
CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
|
||||
if (cg1 != NULL && cg2 != NULL) {
|
||||
const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
|
||||
ciObject* const_oop = oop_ptr->const_oop();
|
||||
ciMethodHandle* mh = const_oop->as_method_handle();
|
||||
|
||||
CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile);
|
||||
CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile);
|
||||
if (cg1 != NULL && cg2 != NULL) {
|
||||
return new PredictedDynamicCallGenerator(mh, cg2, cg1, PROB_FAIR);
|
||||
return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
|
@ -741,7 +736,6 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
|
|||
|
||||
CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
|
||||
ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
|
||||
assert(call_site->is_constant_call_site() || call_site->is_mutable_call_site(), "must be");
|
||||
ciMethodHandle* method_handle = call_site->get_target();
|
||||
|
||||
// Set the callee to have access to the class and signature in the
|
||||
|
@ -754,13 +748,13 @@ CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JV
|
|||
ciMethod* target_method = method_handle->get_invokedynamic_adapter();
|
||||
if (target_method != NULL) {
|
||||
Compile *C = Compile::current();
|
||||
CallGenerator* hit_cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
|
||||
if (hit_cg != NULL && hit_cg->is_inline()) {
|
||||
CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
|
||||
if (cg != NULL && cg->is_inline()) {
|
||||
// Add a dependence for invalidation of the optimization.
|
||||
if (call_site->is_mutable_call_site()) {
|
||||
C->dependencies()->assert_call_site_target_value(C->env()->CallSite_klass(), call_site, method_handle);
|
||||
if (!call_site->is_constant_call_site()) {
|
||||
C->dependencies()->assert_call_site_target_value(call_site, method_handle);
|
||||
}
|
||||
return hit_cg;
|
||||
return cg;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
|
|
|
@ -817,7 +817,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||
&_handler_table, &_inc_table,
|
||||
compiler,
|
||||
env()->comp_level(),
|
||||
true, /*has_debug_info*/
|
||||
has_unsafe_access()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -496,14 +496,6 @@ public:
|
|||
virtual bool depends_only_on_test() const { return false; }
|
||||
};
|
||||
|
||||
//------------------------------MemMoveNode------------------------------------
|
||||
// Memory to memory move. Inserted very late, after allocation.
|
||||
class MemMoveNode : public Node {
|
||||
public:
|
||||
MemMoveNode( Node *dst, Node *src ) : Node(0,dst,src) {}
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
//------------------------------ThreadLocalNode--------------------------------
|
||||
// Ideal Node which returns the base of ThreadLocalStorage.
|
||||
class ThreadLocalNode : public Node {
|
||||
|
|
|
@ -136,16 +136,10 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
|
|||
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
|
||||
ciCallSite* call_site = str.get_call_site();
|
||||
|
||||
// Inline constant and mutable call sites. We don't inline
|
||||
// volatile call sites optimistically since they are specified
|
||||
// to change their value often and that would result in a lot of
|
||||
// deoptimizations and recompiles.
|
||||
if (call_site->is_constant_call_site() || call_site->is_mutable_call_site()) {
|
||||
CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile);
|
||||
if (cg != NULL) {
|
||||
return cg;
|
||||
}
|
||||
}
|
||||
// If something failed, generate a normal dynamic call.
|
||||
return CallGenerator::for_dynamic_call(call_method);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue