This commit is contained in:
Igor Veresov 2011-09-07 11:52:00 -07:00
commit 315ec64dce
129 changed files with 2111 additions and 2336 deletions

View file

@ -1740,7 +1740,7 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
else if (f.isCompiledFrame()) { tty.print("compiled"); } else if (f.isCompiledFrame()) { tty.print("compiled"); }
else if (f.isEntryFrame()) { tty.print("entry"); } else if (f.isEntryFrame()) { tty.print("entry"); }
else if (f.isNativeFrame()) { tty.print("native"); } else if (f.isNativeFrame()) { tty.print("native"); }
else if (f.isGlueFrame()) { tty.print("glue"); } else if (f.isRuntimeFrame()) { tty.print("runtime"); }
else { tty.print("external"); } else { tty.print("external"); }
tty.print(" frame with PC = " + f.getPC() + ", SP = " + f.getSP() + ", FP = " + f.getFP()); tty.print(" frame with PC = " + f.getPC() + ", SP = " + f.getSP() + ", FP = " + f.getFP());
if (f.isSignalHandlerFrameDbg()) { if (f.isSignalHandlerFrameDbg()) {

View file

@ -102,6 +102,11 @@ public class CodeBlob extends VMObject {
/** On-Stack Replacement method */ /** On-Stack Replacement method */
public boolean isOSRMethod() { return false; } public boolean isOSRMethod() { return false; }
public NMethod asNMethodOrNull() {
if (isNMethod()) return (NMethod)this;
return null;
}
// Boundaries // Boundaries
public Address headerBegin() { public Address headerBegin() {
return addr; return addr;
@ -195,7 +200,7 @@ public class CodeBlob extends VMObject {
} }
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments // Returns true, if the next frame is responsible for GC'ing oops passed as arguments
public boolean callerMustGCArguments(JavaThread thread) { return false; } public boolean callerMustGCArguments() { return false; }
public String getName() { public String getName() {
return CStringUtilities.getString(nameField.getValue(addr)); return CStringUtilities.getString(nameField.getValue(addr));

View file

@ -59,6 +59,7 @@ public class CodeCache {
virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class); virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class); virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class); virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class);
virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class); virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class); virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
if (VM.getVM().isServerCompiler()) { if (VM.getVM().isServerCompiler()) {
@ -126,6 +127,10 @@ public class CodeCache {
Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)), Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)),
"found wrong CodeBlob"); "found wrong CodeBlob");
} }
if (result.isRicochetBlob()) {
// This should probably be done for other SingletonBlobs
return VM.getVM().ricochetBlob();
}
return result; return result;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,31 +22,37 @@
* *
*/ */
package sun.jvm.hotspot.runtime.amd64; package sun.jvm.hotspot.code;
import sun.jvm.hotspot.asm.amd64.*; import java.util.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class AMD64RegisterMap extends RegisterMap { public class MethodHandlesAdapterBlob extends AdapterBlob {
static {
/** This is the only public constructor */ VM.registerVMInitializedObserver(new Observer() {
public AMD64RegisterMap(JavaThread thread, boolean updateMap) { public void update(Observable o, Object data) {
super(thread, updateMap); initialize(VM.getVM().getTypeDataBase());
}
});
} }
protected AMD64RegisterMap(RegisterMap map) { private static void initialize(TypeDataBase db) {
super(map); Type type = db.lookupType("MethodHandlesAdapterBlob");
// FIXME: add any needed fields
} }
public Object clone() { public MethodHandlesAdapterBlob(Address addr) {
AMD64RegisterMap retval = new AMD64RegisterMap(this); super(addr);
return retval;
} }
// no PD state to clear or copy: public boolean isMethodHandlesAdapterBlob() {
protected void clearPD() {} return true;
protected void initializePD() {} }
protected void initializeFromPD(RegisterMap map) {}
protected Address getLocationPD(VMReg reg) { return null; } public String getName() {
return "MethodHandlesAdapterBlob: " + super.getName();
}
} }

View file

@ -46,6 +46,7 @@ public class NMethod extends CodeBlob {
/** Offsets for different nmethod parts */ /** Offsets for different nmethod parts */
private static CIntegerField exceptionOffsetField; private static CIntegerField exceptionOffsetField;
private static CIntegerField deoptOffsetField; private static CIntegerField deoptOffsetField;
private static CIntegerField deoptMhOffsetField;
private static CIntegerField origPCOffsetField; private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField; private static CIntegerField stubOffsetField;
private static CIntegerField oopsOffsetField; private static CIntegerField oopsOffsetField;
@ -95,6 +96,7 @@ public class NMethod extends CodeBlob {
exceptionOffsetField = type.getCIntegerField("_exception_offset"); exceptionOffsetField = type.getCIntegerField("_exception_offset");
deoptOffsetField = type.getCIntegerField("_deoptimize_offset"); deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
deoptMhOffsetField = type.getCIntegerField("_deoptimize_mh_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset"); origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset"); stubOffsetField = type.getCIntegerField("_stub_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset"); oopsOffsetField = type.getCIntegerField("_oops_offset");
@ -139,7 +141,8 @@ public class NMethod extends CodeBlob {
public Address instsBegin() { return codeBegin(); } public Address instsBegin() { return codeBegin(); }
public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); } public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); } public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); } public Address deoptHandlerBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
public Address deoptMhHandlerBegin() { return headerBegin().addOffsetTo(getDeoptMhOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); } public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); } public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); } public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
@ -250,6 +253,22 @@ public class NMethod extends CodeBlob {
return (int) scavengeRootStateField.getValue(addr); return (int) scavengeRootStateField.getValue(addr);
} }
// MethodHandle
public boolean isMethodHandleReturn(Address returnPc) {
// Hard to read a bit fields from Java and it's only there for performance
// so just go directly to the PCDesc
// if (!hasMethodHandleInvokes()) return false;
PCDesc pd = getPCDescAt(returnPc);
if (pd == null)
return false;
return pd.isMethodHandleInvoke();
}
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
public boolean isDeoptPc (Address pc) { return isDeoptEntry(pc) || isDeoptMhEntry(pc); }
public boolean isDeoptEntry (Address pc) { return pc == deoptHandlerBegin(); }
public boolean isDeoptMhEntry (Address pc) { return pc == deoptMhHandlerBegin(); }
/** Tells whether frames described by this nmethod can be /** Tells whether frames described by this nmethod can be
deoptimized. Note: native wrappers cannot be deoptimized. */ deoptimized. Note: native wrappers cannot be deoptimized. */
@ -388,6 +407,7 @@ public class NMethod extends CodeBlob {
private int getEntryBCI() { return (int) entryBCIField .getValue(addr); } private int getEntryBCI() { return (int) entryBCIField .getValue(addr); }
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); } private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); } private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
private int getDeoptMhOffset() { return (int) deoptMhOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); } private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); } private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); } private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }

View file

@ -38,6 +38,9 @@ public class PCDesc extends VMObject {
private static CIntegerField scopeDecodeOffsetField; private static CIntegerField scopeDecodeOffsetField;
private static CIntegerField objDecodeOffsetField; private static CIntegerField objDecodeOffsetField;
private static CIntegerField pcFlagsField; private static CIntegerField pcFlagsField;
private static int reexecuteMask;
private static int isMethodHandleInvokeMask;
private static int returnOopMask;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
@ -54,6 +57,10 @@ public class PCDesc extends VMObject {
scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset"); scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset");
objDecodeOffsetField = type.getCIntegerField("_obj_decode_offset"); objDecodeOffsetField = type.getCIntegerField("_obj_decode_offset");
pcFlagsField = type.getCIntegerField("_flags"); pcFlagsField = type.getCIntegerField("_flags");
reexecuteMask = db.lookupIntConstant("PcDesc::PCDESC_reexecute");
isMethodHandleInvokeMask = db.lookupIntConstant("PcDesc::PCDESC_is_method_handle_invoke");
returnOopMask = db.lookupIntConstant("PcDesc::PCDESC_return_oop");
} }
public PCDesc(Address addr) { public PCDesc(Address addr) {
@ -81,7 +88,12 @@ public class PCDesc extends VMObject {
public boolean getReexecute() { public boolean getReexecute() {
int flags = (int)pcFlagsField.getValue(addr); int flags = (int)pcFlagsField.getValue(addr);
return ((flags & 0x1)== 1); //first is the reexecute bit return (flags & reexecuteMask) != 0;
}
public boolean isMethodHandleInvoke() {
int flags = (int)pcFlagsField.getValue(addr);
return (flags & isMethodHandleInvokeMask) != 0;
} }
public void print(NMethod code) { public void print(NMethod code) {

View file

@ -41,11 +41,15 @@ public class RicochetBlob extends SingletonBlob {
} }
private static void initialize(TypeDataBase db) { private static void initialize(TypeDataBase db) {
// Type type = db.lookupType("RicochetBlob"); Type type = db.lookupType("RicochetBlob");
// FIXME: add any needed fields bounceOffsetField = type.getCIntegerField("_bounce_offset");
exceptionOffsetField = type.getCIntegerField("_exception_offset");
} }
private static CIntegerField bounceOffsetField;
private static CIntegerField exceptionOffsetField;
public RicochetBlob(Address addr) { public RicochetBlob(Address addr) {
super(addr); super(addr);
} }
@ -53,4 +57,14 @@ public class RicochetBlob extends SingletonBlob {
public boolean isRicochetBlob() { public boolean isRicochetBlob() {
return true; return true;
} }
public Address bounceAddr() {
return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr));
}
public boolean returnsToBounceAddr(Address pc) {
Address bouncePc = bounceAddr();
return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc));
}
} }

View file

@ -30,6 +30,8 @@ import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
public class RuntimeStub extends CodeBlob { public class RuntimeStub extends CodeBlob {
private static CIntegerField callerMustGCArgumentsField;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) { public void update(Observable o, Object data) {
@ -40,6 +42,7 @@ public class RuntimeStub extends CodeBlob {
private static void initialize(TypeDataBase db) { private static void initialize(TypeDataBase db) {
Type type = db.lookupType("RuntimeStub"); Type type = db.lookupType("RuntimeStub");
callerMustGCArgumentsField = type.getCIntegerField("_caller_must_gc_arguments");
// FIXME: add any needed fields // FIXME: add any needed fields
} }
@ -52,6 +55,11 @@ public class RuntimeStub extends CodeBlob {
return true; return true;
} }
public boolean callerMustGCArguments() {
return callerMustGCArgumentsField.getValue(addr) != 0;
}
public String getName() { public String getName() {
return "RuntimeStub: " + super.getName(); return "RuntimeStub: " + super.getName();
} }

View file

@ -246,7 +246,7 @@ public class OopMapSet extends VMObject {
} }
// Check if caller must update oop argument // Check if caller must update oop argument
regMap.setIncludeArgumentOops(cb.callerMustGCArguments(regMap.getThread())); regMap.setIncludeArgumentOops(cb.callerMustGCArguments());
int nofCallee = 0; int nofCallee = 0;
Address[] locs = new Address[2 * REG_COUNT + 1]; Address[] locs = new Address[2 * REG_COUNT + 1];

View file

@ -90,7 +90,7 @@ public class BytecodeLoadConstant extends Bytecode {
jcode == Bytecodes._ldc2_w; jcode == Bytecodes._ldc2_w;
if (! codeOk) return false; if (! codeOk) return false;
ConstantTag ctag = method().getConstants().getTagAt(rawIndex()); ConstantTag ctag = method().getConstants().getTagAt(poolIndex());
if (jcode == Bytecodes._ldc2_w) { if (jcode == Bytecodes._ldc2_w) {
// has to be double or long // has to be double or long
return (ctag.isDouble() || ctag.isLong()) ? true: false; return (ctag.isDouble() || ctag.isLong()) ? true: false;

View file

@ -28,11 +28,13 @@ import java.io.*;
import com.sun.jdi.*; import com.sun.jdi.*;
import sun.jvm.hotspot.memory.SystemDictionary;
import sun.jvm.hotspot.oops.Instance; import sun.jvm.hotspot.oops.Instance;
import sun.jvm.hotspot.oops.InstanceKlass; import sun.jvm.hotspot.oops.InstanceKlass;
import sun.jvm.hotspot.oops.ArrayKlass; import sun.jvm.hotspot.oops.ArrayKlass;
import sun.jvm.hotspot.oops.JVMDIClassStatus; import sun.jvm.hotspot.oops.JVMDIClassStatus;
import sun.jvm.hotspot.oops.Klass; import sun.jvm.hotspot.oops.Klass;
import sun.jvm.hotspot.oops.ObjArray;
import sun.jvm.hotspot.oops.Oop; import sun.jvm.hotspot.oops.Oop;
import sun.jvm.hotspot.oops.Symbol; import sun.jvm.hotspot.oops.Symbol;
import sun.jvm.hotspot.oops.DefaultHeapVisitor; import sun.jvm.hotspot.oops.DefaultHeapVisitor;
@ -53,6 +55,7 @@ implements ReferenceType {
private SoftReference methodsCache; private SoftReference methodsCache;
private SoftReference allMethodsCache; private SoftReference allMethodsCache;
private SoftReference nestedTypesCache; private SoftReference nestedTypesCache;
private SoftReference methodInvokesCache;
/* to mark when no info available */ /* to mark when no info available */
static final SDE NO_SDE_INFO_MARK = new SDE(); static final SDE NO_SDE_INFO_MARK = new SDE();
@ -82,6 +85,27 @@ implements ReferenceType {
return method; return method;
} }
} }
if (ref.getMethodHolder().equals(SystemDictionary.getMethodHandleKlass())) {
// invoke methods are generated as needed, so make mirrors as needed
List mis = null;
if (methodInvokesCache == null) {
mis = new ArrayList();
methodInvokesCache = new SoftReference(mis);
} else {
mis = (List)methodInvokesCache.get();
}
it = mis.iterator();
while (it.hasNext()) {
MethodImpl method = (MethodImpl)it.next();
if (ref.equals(method.ref())) {
return method;
}
}
MethodImpl method = MethodImpl.createMethodImpl(vm, this, ref);
mis.add(method);
return method;
}
throw new IllegalArgumentException("Invalid method id: " + ref); throw new IllegalArgumentException("Invalid method id: " + ref);
} }

View file

@ -123,6 +123,9 @@ public class StackFrameImpl extends MirrorImpl
Assert.that(values.size() > 0, "this is missing"); Assert.that(values.size() > 0, "this is missing");
} }
// 'this' at index 0. // 'this' at index 0.
if (values.get(0).getType() == BasicType.getTConflict()) {
return null;
}
OopHandle handle = values.oopHandleAt(0); OopHandle handle = values.oopHandleAt(0);
ObjectHeap heap = vm.saObjectHeap(); ObjectHeap heap = vm.saObjectHeap();
thisObject = vm.objectMirror(heap.newOop(handle)); thisObject = vm.objectMirror(heap.newOop(handle));
@ -210,6 +213,8 @@ public class StackFrameImpl extends MirrorImpl
validateStackFrame(); validateStackFrame();
StackValueCollection values = saFrame.getLocals(); StackValueCollection values = saFrame.getLocals();
MethodImpl mmm = (MethodImpl)location.method(); MethodImpl mmm = (MethodImpl)location.method();
if (mmm.isNative())
return null;
List argSigs = mmm.argumentSignatures(); List argSigs = mmm.argumentSignatures();
int count = argSigs.size(); int count = argSigs.size();
List res = new ArrayList(0); List res = new ArrayList(0);
@ -231,6 +236,38 @@ public class StackFrameImpl extends MirrorImpl
ValueImpl valueImpl = null; ValueImpl valueImpl = null;
OopHandle handle = null; OopHandle handle = null;
ObjectHeap heap = vm.saObjectHeap(); ObjectHeap heap = vm.saObjectHeap();
if (values.get(ss).getType() == BasicType.getTConflict()) {
// Dead locals, so just represent them as a zero of the appropriate type
if (variableType == BasicType.T_BOOLEAN) {
valueImpl = (BooleanValueImpl) vm.mirrorOf(false);
} else if (variableType == BasicType.T_CHAR) {
valueImpl = (CharValueImpl) vm.mirrorOf((char)0);
} else if (variableType == BasicType.T_FLOAT) {
valueImpl = (FloatValueImpl) vm.mirrorOf((float)0);
} else if (variableType == BasicType.T_DOUBLE) {
valueImpl = (DoubleValueImpl) vm.mirrorOf((double)0);
} else if (variableType == BasicType.T_BYTE) {
valueImpl = (ByteValueImpl) vm.mirrorOf((byte)0);
} else if (variableType == BasicType.T_SHORT) {
valueImpl = (ShortValueImpl) vm.mirrorOf((short)0);
} else if (variableType == BasicType.T_INT) {
valueImpl = (IntegerValueImpl) vm.mirrorOf((int)0);
} else if (variableType == BasicType.T_LONG) {
valueImpl = (LongValueImpl) vm.mirrorOf((long)0);
} else if (variableType == BasicType.T_OBJECT) {
// we may have an [Ljava/lang/Object; - i.e., Object[] with the
// elements themselves may be arrays because every array is an Object.
handle = null;
valueImpl = (ObjectReferenceImpl) vm.objectMirror(heap.newOop(handle));
} else if (variableType == BasicType.T_ARRAY) {
handle = null;
valueImpl = vm.arrayMirror((Array)heap.newOop(handle));
} else if (variableType == BasicType.T_VOID) {
valueImpl = new VoidValueImpl(vm);
} else {
throw new RuntimeException("Should not read here");
}
} else {
if (variableType == BasicType.T_BOOLEAN) { if (variableType == BasicType.T_BOOLEAN) {
valueImpl = (BooleanValueImpl) vm.mirrorOf(values.booleanAt(ss)); valueImpl = (BooleanValueImpl) vm.mirrorOf(values.booleanAt(ss));
} else if (variableType == BasicType.T_CHAR) { } else if (variableType == BasicType.T_CHAR) {
@ -260,6 +297,7 @@ public class StackFrameImpl extends MirrorImpl
} else { } else {
throw new RuntimeException("Should not read here"); throw new RuntimeException("Should not read here");
} }
}
return valueImpl; return valueImpl;
} }

View file

@ -44,6 +44,7 @@ public class SystemDictionary {
private static sun.jvm.hotspot.types.OopField systemKlassField; private static sun.jvm.hotspot.types.OopField systemKlassField;
private static sun.jvm.hotspot.types.OopField threadKlassField; private static sun.jvm.hotspot.types.OopField threadKlassField;
private static sun.jvm.hotspot.types.OopField threadGroupKlassField; private static sun.jvm.hotspot.types.OopField threadGroupKlassField;
private static sun.jvm.hotspot.types.OopField methodHandleKlassField;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
@ -69,6 +70,7 @@ public class SystemDictionary {
systemKlassField = type.getOopField(WK_KLASS("System_klass")); systemKlassField = type.getOopField(WK_KLASS("System_klass"));
threadKlassField = type.getOopField(WK_KLASS("Thread_klass")); threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass")); threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
methodHandleKlassField = type.getOopField(WK_KLASS("MethodHandle_klass"));
} }
// This WK functions must follow the definitions in systemDictionary.hpp: // This WK functions must follow the definitions in systemDictionary.hpp:
@ -127,6 +129,10 @@ public class SystemDictionary {
return (InstanceKlass) newOop(systemKlassField.getValue()); return (InstanceKlass) newOop(systemKlassField.getValue());
} }
public static InstanceKlass getMethodHandleKlass() {
return (InstanceKlass) newOop(methodHandleKlassField.getValue());
}
public InstanceKlass getAbstractOwnableSynchronizerKlass() { public InstanceKlass getAbstractOwnableSynchronizerKlass() {
return (InstanceKlass) find("java/util/concurrent/locks/AbstractOwnableSynchronizer", return (InstanceKlass) find("java/util/concurrent/locks/AbstractOwnableSynchronizer",
null, null); null, null);

View file

@ -93,6 +93,8 @@ public class CompiledVFrame extends JavaVFrame {
} }
public StackValueCollection getLocals() { public StackValueCollection getLocals() {
if (getScope() == null)
return new StackValueCollection();
List scvList = getScope().getLocals(); List scvList = getScope().getLocals();
if (scvList == null) if (scvList == null)
return new StackValueCollection(); return new StackValueCollection();
@ -108,6 +110,8 @@ public class CompiledVFrame extends JavaVFrame {
} }
public StackValueCollection getExpressions() { public StackValueCollection getExpressions() {
if (getScope() == null)
return new StackValueCollection();
List scvList = getScope().getExpressions(); List scvList = getScope().getExpressions();
if (scvList == null) if (scvList == null)
return new StackValueCollection(); return new StackValueCollection();

View file

@ -33,6 +33,7 @@ import sun.jvm.hotspot.c1.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.interpreter.*; import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.oops.*; import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.sparc.SPARCFrame;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
@ -74,11 +75,19 @@ public abstract class Frame implements Cloneable {
/** Size of constMethodOopDesc for computing BCI from BCP (FIXME: hack) */ /** Size of constMethodOopDesc for computing BCI from BCP (FIXME: hack) */
private static long constMethodOopDescSize; private static long constMethodOopDescSize;
private static int pcReturnOffset;
public static int pcReturnOffset() {
return pcReturnOffset;
}
private static synchronized void initialize(TypeDataBase db) { private static synchronized void initialize(TypeDataBase db) {
Type constMethodOopType = db.lookupType("constMethodOopDesc"); Type constMethodOopType = db.lookupType("constMethodOopDesc");
// FIXME: not sure whether alignment here is correct or how to // FIXME: not sure whether alignment here is correct or how to
// force it (round up to address size?) // force it (round up to address size?)
constMethodOopDescSize = constMethodOopType.getSize(); constMethodOopDescSize = constMethodOopType.getSize();
pcReturnOffset = db.lookupIntConstant("frame::pc_return_offset").intValue();
} }
protected int bcpToBci(Address bcp, ConstMethod cm) { protected int bcpToBci(Address bcp, ConstMethod cm) {
@ -106,6 +115,10 @@ public abstract class Frame implements Cloneable {
public void setPC(Address newpc) { pc = newpc; } public void setPC(Address newpc) { pc = newpc; }
public boolean isDeoptimized() { return deoptimized; } public boolean isDeoptimized() { return deoptimized; }
public CodeBlob cb() {
return VM.getVM().getCodeCache().findBlob(getPC());
}
public abstract Address getSP(); public abstract Address getSP();
public abstract Address getID(); public abstract Address getID();
public abstract Address getFP(); public abstract Address getFP();
@ -134,6 +147,12 @@ public abstract class Frame implements Cloneable {
} }
} }
public boolean isRicochetFrame() {
CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC());
RicochetBlob rcb = VM.getVM().ricochetBlob();
return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC()));
}
public boolean isCompiledFrame() { public boolean isCompiledFrame() {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(!VM.getVM().isCore(), "noncore builds only"); Assert.that(!VM.getVM().isCore(), "noncore builds only");
@ -142,7 +161,7 @@ public abstract class Frame implements Cloneable {
return (cb != null && cb.isJavaMethod()); return (cb != null && cb.isJavaMethod());
} }
public boolean isGlueFrame() { public boolean isRuntimeFrame() {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(!VM.getVM().isCore(), "noncore builds only"); Assert.that(!VM.getVM().isCore(), "noncore builds only");
} }
@ -197,7 +216,8 @@ public abstract class Frame implements Cloneable {
public Frame realSender(RegisterMap map) { public Frame realSender(RegisterMap map) {
if (!VM.getVM().isCore()) { if (!VM.getVM().isCore()) {
Frame result = sender(map); Frame result = sender(map);
while (result.isGlueFrame()) { while (result.isRuntimeFrame() ||
result.isRicochetFrame()) {
result = result.sender(map); result = result.sender(map);
} }
return result; return result;
@ -611,6 +631,9 @@ public abstract class Frame implements Cloneable {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(cb != null, "sanity check"); Assert.that(cb != null, "sanity check");
} }
if (cb == VM.getVM().ricochetBlob()) {
oopsRicochetDo(oopVisitor, regMap);
}
if (cb.getOopMaps() != null) { if (cb.getOopMaps() != null) {
OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging()); OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging());
@ -627,6 +650,10 @@ public abstract class Frame implements Cloneable {
// } // }
} }
private void oopsRicochetDo (AddressVisitor oopVisitor, RegisterMap regMap) {
// XXX Empty for now
}
// FIXME: implement the above routines, plus add // FIXME: implement the above routines, plus add
// oops_interpreted_arguments_do and oops_compiled_arguments_do // oops_interpreted_arguments_do and oops_compiled_arguments_do
} }

View file

@ -128,14 +128,14 @@ public abstract class JavaVFrame extends VFrame {
} }
// dynamic part - we just compare the frame pointer // dynamic part - we just compare the frame pointer
if (! getFrame().getFP().equals(other.getFrame().getFP())) { if (! getFrame().equals(other.getFrame())) {
return false; return false;
} }
return true; return true;
} }
public int hashCode() { public int hashCode() {
return getMethod().hashCode() ^ getBCI() ^ getFrame().getFP().hashCode(); return getMethod().hashCode() ^ getBCI() ^ getFrame().hashCode();
} }
/** Structural compare */ /** Structural compare */

View file

@ -100,7 +100,7 @@ public class StackValue {
public int hashCode() { public int hashCode() {
if (type == BasicType.getTObject()) { if (type == BasicType.getTObject()) {
return handleValue.hashCode(); return handleValue != null ? handleValue.hashCode() : 5;
} else { } else {
// Returns 0 for conflict type // Returns 0 for conflict type
return (int) integerValue; return (int) integerValue;

View file

@ -77,7 +77,7 @@ public class VFrame {
return new CompiledVFrame(f, regMap, thread, scope, mayBeImprecise); return new CompiledVFrame(f, regMap, thread, scope, mayBeImprecise);
} }
if (f.isGlueFrame()) { if (f.isRuntimeFrame()) {
// This is a conversion frame. Skip this frame and try again. // This is a conversion frame. Skip this frame and try again.
RegisterMap tempMap = regMap.copy(); RegisterMap tempMap = regMap.copy();
Frame s = f.sender(tempMap); Frame s = f.sender(tempMap);

View file

@ -30,6 +30,7 @@ import java.util.*;
import java.util.regex.*; import java.util.regex.*;
import sun.jvm.hotspot.code.*; import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.c1.*; import sun.jvm.hotspot.c1.*;
import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.interpreter.*; import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.memory.*; import sun.jvm.hotspot.memory.*;
@ -85,6 +86,9 @@ public class VM {
private Interpreter interpreter; private Interpreter interpreter;
private StubRoutines stubRoutines; private StubRoutines stubRoutines;
private Bytes bytes; private Bytes bytes;
private RicochetBlob ricochetBlob;
/** Flags indicating whether we are attached to a core, C1, or C2 build */ /** Flags indicating whether we are attached to a core, C1, or C2 build */
private boolean usingClientCompiler; private boolean usingClientCompiler;
private boolean usingServerCompiler; private boolean usingServerCompiler;
@ -618,6 +622,18 @@ public class VM {
return stubRoutines; return stubRoutines;
} }
public RicochetBlob ricochetBlob() {
if (ricochetBlob == null) {
Type ricochetType = db.lookupType("SharedRuntime");
AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob");
Address addr = ricochetBlobAddress.getValue();
if (addr != null) {
ricochetBlob = new RicochetBlob(addr);
}
}
return ricochetBlob;
}
public VMRegImpl getVMRegImplInfo() { public VMRegImpl getVMRegImplInfo() {
if (vmregImpl == null) { if (vmregImpl == null) {
vmregImpl = new VMRegImpl(); vmregImpl = new VMRegImpl();

View file

@ -29,6 +29,7 @@ import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.code.*; import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.interpreter.*; import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.runtime.x86.*;
/** <P> Should be able to be used on all amd64 platforms we support /** <P> Should be able to be used on all amd64 platforms we support
(Linux/amd64) to implement JavaThread's (Linux/amd64) to implement JavaThread's
@ -123,7 +124,7 @@ public class AMD64CurrentFrameGuess {
offset += vm.getAddressSize()) { offset += vm.getAddressSize()) {
try { try {
Address curSP = sp.addOffsetTo(offset); Address curSP = sp.addOffsetTo(offset);
Frame frame = new AMD64Frame(curSP, null, pc); Frame frame = new X86Frame(curSP, null, pc);
RegisterMap map = thread.newRegisterMap(false); RegisterMap map = thread.newRegisterMap(false);
while (frame != null) { while (frame != null) {
if (frame.isEntryFrame() && frame.entryFrameIsFirst()) { if (frame.isEntryFrame() && frame.entryFrameIsFirst()) {

View file

@ -1,528 +0,0 @@
/*
* Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.amd64;
import java.util.*;
import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.compiler.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
/** Specialization of and implementation of abstract methods of the
Frame class for the amd64 CPU. */
public class AMD64Frame extends Frame {
private static final boolean DEBUG;
static {
DEBUG = System.getProperty("sun.jvm.hotspot.runtime.amd64.AMD64Frame.DEBUG") != null;
}
// refer to frame_amd64.hpp
private static final int PC_RETURN_OFFSET = 0;
// All frames
private static final int LINK_OFFSET = 0;
private static final int RETURN_ADDR_OFFSET = 1;
private static final int SENDER_SP_OFFSET = 2;
// Interpreter frames
private static final int INTERPRETER_FRAME_MIRROR_OFFSET = 2; // for native calls only
private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1;
private static final int INTERPRETER_FRAME_LAST_SP_OFFSET = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1;
private static final int INTERPRETER_FRAME_METHOD_OFFSET = INTERPRETER_FRAME_LAST_SP_OFFSET - 1;
private static int INTERPRETER_FRAME_MDX_OFFSET; // Non-core builds only
private static int INTERPRETER_FRAME_CACHE_OFFSET;
private static int INTERPRETER_FRAME_LOCALS_OFFSET;
private static int INTERPRETER_FRAME_BCX_OFFSET;
private static int INTERPRETER_FRAME_INITIAL_SP_OFFSET;
private static int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET;
private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
// Entry frames
private static final int ENTRY_FRAME_CALL_WRAPPER_OFFSET = -6;
// Native frames
private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
if (VM.getVM().isCore()) {
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
} else {
INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
}
INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1;
INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1;
INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
}
// an additional field beyond sp and pc:
Address raw_fp; // frame pointer
private Address raw_unextendedSP;
private AMD64Frame() {
}
private void adjustForDeopt() {
if ( pc != null) {
// Look for a deopt pc and if it is deopted convert to original pc
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
if (cb != null && cb.isJavaMethod()) {
NMethod nm = (NMethod) cb;
if (pc.equals(nm.deoptBegin())) {
// adjust pc if frame is deoptimized.
if (Assert.ASSERTS_ENABLED) {
Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
}
pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
deoptimized = true;
}
}
}
}
public AMD64Frame(Address raw_sp, Address raw_fp, Address pc) {
this.raw_sp = raw_sp;
this.raw_unextendedSP = raw_sp;
this.raw_fp = raw_fp;
this.pc = pc;
// Frame must be fully constructed before this call
adjustForDeopt();
if (DEBUG) {
System.out.println("AMD64Frame(sp, fp, pc): " + this);
dumpStack();
}
}
public AMD64Frame(Address raw_sp, Address raw_fp) {
this.raw_sp = raw_sp;
this.raw_unextendedSP = raw_sp;
this.raw_fp = raw_fp;
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
// Frame must be fully constructed before this call
adjustForDeopt();
if (DEBUG) {
System.out.println("AMD64Frame(sp, fp): " + this);
dumpStack();
}
}
// This constructor should really take the unextended SP as an arg
// but then the constructor is ambiguous with constructor that takes
// a PC so take an int and convert it.
public AMD64Frame(Address raw_sp, Address raw_fp, long extension) {
this.raw_sp = raw_sp;
if ( raw_sp == null) {
this.raw_unextendedSP = null;
} else {
this.raw_unextendedSP = raw_sp.addOffsetTo(extension);
}
this.raw_fp = raw_fp;
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
// Frame must be fully constructed before this call
adjustForDeopt();
if (DEBUG) {
System.out.println("AMD64Frame(sp, fp, extension): " + this);
dumpStack();
}
}
public Object clone() {
AMD64Frame frame = new AMD64Frame();
frame.raw_sp = raw_sp;
frame.raw_unextendedSP = raw_unextendedSP;
frame.raw_fp = raw_fp;
frame.pc = pc;
frame.deoptimized = deoptimized;
return frame;
}
public boolean equals(Object arg) {
if (arg == null) {
return false;
}
if (!(arg instanceof AMD64Frame)) {
return false;
}
AMD64Frame other = (AMD64Frame) arg;
return (AddressOps.equal(getSP(), other.getSP()) &&
AddressOps.equal(getFP(), other.getFP()) &&
AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) &&
AddressOps.equal(getPC(), other.getPC()));
}
public int hashCode() {
if (raw_sp == null) {
return 0;
}
return raw_sp.hashCode();
}
public String toString() {
return "sp: " + (getSP() == null? "null" : getSP().toString()) +
", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) +
", fp: " + (getFP() == null? "null" : getFP().toString()) +
", pc: " + (pc == null? "null" : pc.toString());
}
// accessors for the instance variables
public Address getFP() { return raw_fp; }
public Address getSP() { return raw_sp; }
public Address getID() { return raw_sp; }
// FIXME: not implemented yet (should be done for Solaris/AMD64)
public boolean isSignalHandlerFrameDbg() { return false; }
public int getSignalNumberDbg() { return 0; }
public String getSignalNameDbg() { return null; }
public boolean isInterpretedFrameValid() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(isInterpretedFrame(), "Not an interpreted frame");
}
// These are reasonable sanity checks
if (getFP() == null || getFP().andWithMask(0x3) != null) {
return false;
}
if (getSP() == null || getSP().andWithMask(0x3) != null) {
return false;
}
if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) {
return false;
}
// These are hacks to keep us out of trouble.
// The problem with these is that they mask other problems
if (getFP().lessThanOrEqual(getSP())) {
// this attempts to deal with unsigned comparison above
return false;
}
if (getFP().minus(getSP()) > 4096 * VM.getVM().getAddressSize()) {
// stack frames shouldn't be large.
return false;
}
return true;
}
// FIXME: not applicable in current system
// void patch_pc(Thread* thread, address pc);
public Frame sender(RegisterMap regMap, CodeBlob cb) {
AMD64RegisterMap map = (AMD64RegisterMap) regMap;
if (Assert.ASSERTS_ENABLED) {
Assert.that(map != null, "map must be set");
}
// Default is we done have to follow them. The sender_for_xxx will
// update it accordingly
map.setIncludeArgumentOops(false);
if (isEntryFrame()) return senderForEntryFrame(map);
if (isInterpretedFrame()) return senderForInterpreterFrame(map);
if (!VM.getVM().isCore()) {
if(cb == null) {
cb = VM.getVM().getCodeCache().findBlob(getPC());
} else {
if (Assert.ASSERTS_ENABLED) {
Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same");
}
}
if (cb != null) {
return senderForCompiledFrame(map, cb);
}
}
// Must be native-compiled frame, i.e. the marshaling code for native
// methods that exists in the core system.
return new AMD64Frame(getSenderSP(), getLink(), getSenderPC());
}
private Frame senderForEntryFrame(AMD64RegisterMap map) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(map != null, "map must be set");
}
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
AMD64JavaCallWrapper jcw = (AMD64JavaCallWrapper) getEntryFrameCallWrapper();
if (Assert.ASSERTS_ENABLED) {
Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero");
Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack");
}
AMD64Frame fr;
if (jcw.getLastJavaPC() != null) {
fr = new AMD64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC());
} else {
fr = new AMD64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP());
}
map.clear();
if (Assert.ASSERTS_ENABLED) {
Assert.that(map.getIncludeArgumentOops(), "should be set by clear");
}
return fr;
}
private Frame senderForInterpreterFrame(AMD64RegisterMap map) {
Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
// We do not need to update the callee-save register mapping because above
// us is either another interpreter frame or a converter-frame, but never
// directly a compiled frame.
// 11/24/04 SFG. This is no longer true after adapter were removed. However at the moment
// C2 no longer uses callee save register for java calls so there are no callee register
// to find.
return new AMD64Frame(sp, getLink(), unextendedSP.minus(sp));
}
private Frame senderForCompiledFrame(AMD64RegisterMap map, CodeBlob cb) {
//
// NOTE: some of this code is (unfortunately) duplicated in AMD64CurrentFrameGuess
//
if (Assert.ASSERTS_ENABLED) {
Assert.that(map != null, "map must be set");
}
// frame owned by optimizing compiler
Address sender_sp = null;
if (VM.getVM().isClientCompiler()) {
sender_sp = addressOfStackSlot(SENDER_SP_OFFSET);
} else {
if (Assert.ASSERTS_ENABLED) {
Assert.that(cb.getFrameSize() >= 0, "Compiled by Compiler1: do not use");
}
sender_sp = getUnextendedSP().addOffsetTo(cb.getFrameSize());
}
// On Intel the return_address is always the word on the stack
Address sender_pc = sender_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
if (map.getUpdateMap() && cb.getOopMaps() != null) {
OopMapSet.updateRegisterMap(this, cb, map, true);
}
if (VM.getVM().isClientCompiler()) {
// Move this here for C1 and collecting oops in arguments (According to Rene)
map.setIncludeArgumentOops(cb.callerMustGCArguments(map.getThread()));
}
Address saved_fp = null;
if (VM.getVM().isClientCompiler()) {
saved_fp = getFP().getAddressAt(0);
} else if (VM.getVM().isServerCompiler() &&
(VM.getVM().getInterpreter().contains(sender_pc) ||
VM.getVM().getStubRoutines().returnsToCallStub(sender_pc))) {
// C2 prologue saves EBP in the usual place.
// however only use it if the sender had link infomration in it.
saved_fp = sender_sp.getAddressAt(-2 * VM.getVM().getAddressSize());
}
return new AMD64Frame(sender_sp, saved_fp, sender_pc);
}
protected boolean hasSenderPD() {
// FIXME
// Check for null ebp? Need to do some tests.
return true;
}
public long frameSize() {
return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize());
}
public Address getLink() {
return addressOfStackSlot(LINK_OFFSET).getAddressAt(0);
}
// FIXME: not implementable yet
//inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; }
public Address getUnextendedSP() { return raw_unextendedSP; }
// Return address:
public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); }
public Address getSenderPC() { return getSenderPCAddr().getAddressAt(0); }
// return address of param, zero origin index.
public Address getNativeParamAddr(int idx) {
return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx);
}
public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); }
public Address compiledArgumentToLocationPD(VMReg reg, RegisterMap regMap, int argSize) {
if (VM.getVM().isCore() || VM.getVM().isClientCompiler()) {
throw new RuntimeException("Should not reach here");
}
return oopMapRegToLocation(reg, regMap);
}
public Address addressOfInterpreterFrameLocals() {
return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
}
private Address addressOfInterpreterFrameBCX() {
return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET);
}
public int getInterpreterFrameBCI() {
// FIXME: this is not atomic with respect to GC and is unsuitable
// for use in a non-debugging, or reflective, system. Need to
// figure out how to express this.
Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0);
OopHandle methodHandle = addressOfInterpreterFrameMethod().getOopHandleAt(0);
Method method = (Method) VM.getVM().getObjectHeap().newOop(methodHandle);
return (int) bcpToBci(bcp, method);
}
public Address addressOfInterpreterFrameMDX() {
return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET);
}
// FIXME
//inline int frame::interpreter_frame_monitor_size() {
// return BasicObjectLock::size();
//}
// expression stack
// (the max_stack arguments are used by the GC; see class FrameClosure)
public Address addressOfInterpreterFrameExpressionStack() {
Address monitorEnd = interpreterFrameMonitorEnd().address();
return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize());
}
public int getInterpreterFrameExpressionStackDirection() { return -1; }
// top of expression stack
public Address addressOfInterpreterFrameTOS() {
return getSP();
}
/** Expression stack from top down */
public Address addressOfInterpreterFrameTOSAt(int slot) {
return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize());
}
public Address getInterpreterFrameSenderSP() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(isInterpretedFrame(), "interpreted frame expected");
}
return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
}
// Monitors
public BasicObjectLock interpreterFrameMonitorBegin() {
return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET));
}
public BasicObjectLock interpreterFrameMonitorEnd() {
Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0);
if (Assert.ASSERTS_ENABLED) {
// make sure the pointer points inside the frame
Assert.that(AddressOps.gt(getFP(), result), "result must < than frame pointer");
Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer");
}
return new BasicObjectLock(result);
}
public int interpreterFrameMonitorSize() {
return BasicObjectLock.size();
}
// Method
public Address addressOfInterpreterFrameMethod() {
return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET);
}
// Constant pool cache
public Address addressOfInterpreterFrameCPCache() {
return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET);
}
// Entry frames
public JavaCallWrapper getEntryFrameCallWrapper() {
return new AMD64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0));
}
protected Address addressOfSavedOopResult() {
// offset is 2 for compiler2 and 3 for compiler1
return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) *
VM.getVM().getAddressSize());
}
protected Address addressOfSavedReceiver() {
return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize());
}
private void dumpStack() {
if (getFP() != null) {
for (Address addr = getSP().addOffsetTo(-5 * VM.getVM().getAddressSize());
AddressOps.lte(addr, getFP().addOffsetTo(5 * VM.getVM().getAddressSize()));
addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
System.out.println(addr + ": " + addr.getAddressAt(0));
}
} else {
for (Address addr = getSP().addOffsetTo(-5 * VM.getVM().getAddressSize());
AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize()));
addr = addr.addOffsetTo(VM.getVM().getAddressSize())) {
System.out.println(addr + ": " + addr.getAddressAt(0));
}
}
}
}

View file

@ -30,6 +30,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.amd64.*; import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.runtime.amd64.*; import sun.jvm.hotspot.runtime.amd64.*;
import sun.jvm.hotspot.runtime.x86.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
@ -80,11 +81,11 @@ public class LinuxAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
if (fp == null) { if (fp == null) {
return null; // no information return null; // no information
} }
return new AMD64Frame(thread.getLastJavaSP(), fp); return new X86Frame(thread.getLastJavaSP(), fp);
} }
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) { public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
return new AMD64RegisterMap(thread, updateMap); return new X86RegisterMap(thread, updateMap);
} }
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) { public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
@ -95,9 +96,9 @@ public class LinuxAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
return null; return null;
} }
if (guesser.getPC() == null) { if (guesser.getPC() == null) {
return new AMD64Frame(guesser.getSP(), guesser.getFP()); return new X86Frame(guesser.getSP(), guesser.getFP());
} else { } else {
return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC()); return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
} }
} }

View file

@ -30,6 +30,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.amd64.*; import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.runtime.amd64.*; import sun.jvm.hotspot.runtime.amd64.*;
import sun.jvm.hotspot.runtime.x86.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
@ -84,14 +85,14 @@ public class SolarisAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
} }
Address pc = thread.getLastJavaPC(); Address pc = thread.getLastJavaPC();
if ( pc != null ) { if ( pc != null ) {
return new AMD64Frame(thread.getLastJavaSP(), fp, pc); return new X86Frame(thread.getLastJavaSP(), fp, pc);
} else { } else {
return new AMD64Frame(thread.getLastJavaSP(), fp); return new X86Frame(thread.getLastJavaSP(), fp);
} }
} }
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) { public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
return new AMD64RegisterMap(thread, updateMap); return new X86RegisterMap(thread, updateMap);
} }
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) { public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
@ -102,9 +103,9 @@ public class SolarisAMD64JavaThreadPDAccess implements JavaThreadPDAccess {
return null; return null;
} }
if (guesser.getPC() == null) { if (guesser.getPC() == null) {
return new AMD64Frame(guesser.getSP(), guesser.getFP()); return new X86Frame(guesser.getSP(), guesser.getFP());
} else { } else {
return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC()); return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
} }
} }

View file

@ -236,7 +236,7 @@ public class SPARCFrame extends Frame {
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc); CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
if (cb != null && cb.isJavaMethod()) { if (cb != null && cb.isJavaMethod()) {
NMethod nm = (NMethod) cb; NMethod nm = (NMethod) cb;
if (pc.equals(nm.deoptBegin())) { if (pc.equals(nm.deoptHandlerBegin())) {
// adjust pc if frame is deoptimized. // adjust pc if frame is deoptimized.
pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset()); pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset());
deoptimized = true; deoptimized = true;
@ -559,7 +559,6 @@ public class SPARCFrame extends Frame {
} }
} }
if (!VM.getVM().isCore()) {
// Note: The version of this operation on any platform with callee-save // Note: The version of this operation on any platform with callee-save
// registers must update the register map (if not null). // registers must update the register map (if not null).
// In order to do this correctly, the various subtypes of // In order to do this correctly, the various subtypes of
@ -572,6 +571,7 @@ public class SPARCFrame extends Frame {
// registers callee-saved, then we will have to copy over // registers callee-saved, then we will have to copy over
// the RegisterMap update logic from the Intel code. // the RegisterMap update logic from the Intel code.
if (isRicochetFrame()) return senderForRicochetFrame(map);
// The constructor of the sender must know whether this frame is interpreted so it can set the // The constructor of the sender must know whether this frame is interpreted so it can set the
// sender's _interpreter_sp_adjustment field. // sender's _interpreter_sp_adjustment field.
@ -584,24 +584,21 @@ public class SPARCFrame extends Frame {
// supplied blob which is already known to be associated with this frame. // supplied blob which is already known to be associated with this frame.
cb = VM.getVM().getCodeCache().findBlob(pc); cb = VM.getVM().getCodeCache().findBlob(pc);
if (cb != null) { if (cb != null) {
if (cb.callerMustGCArguments(map.getThread())) {
map.setIncludeArgumentOops(true);
}
// Update the location of all implicitly saved registers // Update the location of all implicitly saved registers
// as the address of these registers in the register save // as the address of these registers in the register save
// area (for %o registers we use the address of the %i // area (for %o registers we use the address of the %i
// register in the next younger frame) // register in the next younger frame)
map.shiftWindow(sp, youngerSP); map.shiftWindow(sp, youngerSP);
if (map.getUpdateMap()) { if (map.getUpdateMap()) {
if (cb.callerMustGCArguments()) {
map.setIncludeArgumentOops(true);
}
if (cb.getOopMaps() != null) { if (cb.getOopMaps() != null) {
OopMapSet.updateRegisterMap(this, cb, map, VM.getVM().isDebugging()); OopMapSet.updateRegisterMap(this, cb, map, VM.getVM().isDebugging());
} }
} }
} }
} }
} // #ifndef CORE
return new SPARCFrame(biasSP(sp), biasSP(youngerSP), isInterpreted); return new SPARCFrame(biasSP(sp), biasSP(youngerSP), isInterpreted);
} }
@ -948,6 +945,20 @@ public class SPARCFrame extends Frame {
} }
private Frame senderForRicochetFrame(SPARCRegisterMap map) {
if (DEBUG) {
System.out.println("senderForRicochetFrame");
}
//RicochetFrame* f = RicochetFrame::from_frame(fr);
// Cf. is_interpreted_frame path of frame::sender
Address youngerSP = getSP();
Address sp = getSenderSP();
map.makeIntegerRegsUnsaved();
map.shiftWindow(sp, youngerSP);
boolean thisFrameAdjustedStack = true; // I5_savedSP is live in this RF
return new SPARCFrame(sp, youngerSP, thisFrameAdjustedStack);
}
private Frame senderForEntryFrame(RegisterMap regMap) { private Frame senderForEntryFrame(RegisterMap regMap) {
SPARCRegisterMap map = (SPARCRegisterMap) regMap; SPARCRegisterMap map = (SPARCRegisterMap) regMap;
@ -965,10 +976,8 @@ public class SPARCFrame extends Frame {
Address lastJavaPC = jcw.getLastJavaPC(); Address lastJavaPC = jcw.getLastJavaPC();
map.clear(); map.clear();
if (!VM.getVM().isCore()) {
map.makeIntegerRegsUnsaved(); map.makeIntegerRegsUnsaved();
map.shiftWindow(lastJavaSP, null); map.shiftWindow(lastJavaSP, null);
}
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(map.getIncludeArgumentOops(), "should be set by clear"); Assert.that(map.getIncludeArgumentOops(), "should be set by clear");

View file

@ -0,0 +1,77 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.sparc;
import java.util.*;
import sun.jvm.hotspot.asm.sparc.SPARCRegister;
import sun.jvm.hotspot.asm.sparc.SPARCRegisters;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class SPARCRicochetFrame {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private SPARCFrame frame;
private static void initialize(TypeDataBase db) {
// Type type = db.lookupType("MethodHandles::RicochetFrame");
}
static SPARCRicochetFrame fromFrame(SPARCFrame f) {
return new SPARCRicochetFrame(f);
}
private SPARCRicochetFrame(SPARCFrame f) {
frame = f;
}
private Address registerValue(SPARCRegister reg) {
return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0);
}
public Address savedArgsBase() {
return registerValue(SPARCRegisters.L4);
}
public Address exactSenderSP() {
return registerValue(SPARCRegisters.I5);
}
public Address senderLink() {
return frame.getSenderSP();
}
public Address senderPC() {
return frame.getSenderPC();
}
public Address extendedSenderSP() {
return savedArgsBase();
}
}

View file

@ -31,6 +31,7 @@ import sun.jvm.hotspot.debugger.win32.*;
import sun.jvm.hotspot.debugger.amd64.*; import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.runtime.amd64.*; import sun.jvm.hotspot.runtime.amd64.*;
import sun.jvm.hotspot.runtime.x86.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
@ -86,14 +87,14 @@ public class Win32AMD64JavaThreadPDAccess implements JavaThreadPDAccess {
} }
Address pc = thread.getLastJavaPC(); Address pc = thread.getLastJavaPC();
if ( pc != null ) { if ( pc != null ) {
return new AMD64Frame(thread.getLastJavaSP(), fp, pc); return new X86Frame(thread.getLastJavaSP(), fp, pc);
} else { } else {
return new AMD64Frame(thread.getLastJavaSP(), fp); return new X86Frame(thread.getLastJavaSP(), fp);
} }
} }
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) { public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
return new AMD64RegisterMap(thread, updateMap); return new X86RegisterMap(thread, updateMap);
} }
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) { public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
@ -104,9 +105,9 @@ public class Win32AMD64JavaThreadPDAccess implements JavaThreadPDAccess {
return null; return null;
} }
if (guesser.getPC() == null) { if (guesser.getPC() == null) {
return new AMD64Frame(guesser.getSP(), guesser.getFP()); return new X86Frame(guesser.getSP(), guesser.getFP());
} else { } else {
return new AMD64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC()); return new X86Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
} }
} }

View file

@ -25,7 +25,6 @@
package sun.jvm.hotspot.runtime.x86; package sun.jvm.hotspot.runtime.x86;
import java.util.*; import java.util.*;
import sun.jvm.hotspot.asm.x86.*;
import sun.jvm.hotspot.code.*; import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.compiler.*; import sun.jvm.hotspot.compiler.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
@ -62,11 +61,13 @@ public class X86Frame extends Frame {
private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET; private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET;
// Entry frames // Entry frames
private static final int ENTRY_FRAME_CALL_WRAPPER_OFFSET = 2; private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET;
// Native frames // Native frames
private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2; private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2;
private static VMReg rbp;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) { public void update(Observable o, Object data) {
@ -76,18 +77,22 @@ public class X86Frame extends Frame {
} }
private static synchronized void initialize(TypeDataBase db) { private static synchronized void initialize(TypeDataBase db) {
if (VM.getVM().isCore()) {
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
} else {
INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1; INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1;
INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1; INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1;
}
INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1; INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1;
INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1; INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1;
INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1; INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1;
INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET; INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET; INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET;
ENTRY_FRAME_CALL_WRAPPER_OFFSET = db.lookupIntConstant("frame::entry_frame_call_wrapper_offset");
if (VM.getVM().getAddressSize() == 4) {
rbp = new VMReg(5);
} else {
rbp = new VMReg(5 << 1);
} }
}
// an additional field beyond sp and pc: // an additional field beyond sp and pc:
Address raw_fp; // frame pointer Address raw_fp; // frame pointer
@ -102,7 +107,7 @@ public class X86Frame extends Frame {
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc); CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
if (cb != null && cb.isJavaMethod()) { if (cb != null && cb.isJavaMethod()) {
NMethod nm = (NMethod) cb; NMethod nm = (NMethod) cb;
if (pc.equals(nm.deoptBegin())) { if (pc.equals(nm.deoptHandlerBegin())) {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(this.getUnextendedSP() != null, "null SP in Java frame"); Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
} }
@ -119,6 +124,7 @@ public class X86Frame extends Frame {
this.raw_unextendedSP = raw_sp; this.raw_unextendedSP = raw_sp;
this.raw_fp = raw_fp; this.raw_fp = raw_fp;
this.pc = pc; this.pc = pc;
adjustUnextendedSP();
// Frame must be fully constructed before this call // Frame must be fully constructed before this call
adjustForDeopt(); adjustForDeopt();
@ -134,6 +140,7 @@ public class X86Frame extends Frame {
this.raw_unextendedSP = raw_sp; this.raw_unextendedSP = raw_sp;
this.raw_fp = raw_fp; this.raw_fp = raw_fp;
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize()); this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize());
adjustUnextendedSP();
// Frame must be fully constructed before this call // Frame must be fully constructed before this call
adjustForDeopt(); adjustForDeopt();
@ -144,24 +151,18 @@ public class X86Frame extends Frame {
} }
} }
// This constructor should really take the unextended SP as an arg public X86Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) {
// but then the constructor is ambiguous with constructor that takes
// a PC so take an int and convert it.
public X86Frame(Address raw_sp, Address raw_fp, long extension) {
this.raw_sp = raw_sp; this.raw_sp = raw_sp;
if (raw_sp == null) { this.raw_unextendedSP = raw_unextendedSp;
this.raw_unextendedSP = null;
} else {
this.raw_unextendedSP = raw_sp.addOffsetTo(extension);
}
this.raw_fp = raw_fp; this.raw_fp = raw_fp;
this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize()); this.pc = pc;
adjustUnextendedSP();
// Frame must be fully constructed before this call // Frame must be fully constructed before this call
adjustForDeopt(); adjustForDeopt();
if (DEBUG) { if (DEBUG) {
System.out.println("X86Frame(sp, fp): " + this); System.out.println("X86Frame(sp, unextendedSP, fp, pc): " + this);
dumpStack(); dumpStack();
} }
@ -172,7 +173,6 @@ public class X86Frame extends Frame {
frame.raw_sp = raw_sp; frame.raw_sp = raw_sp;
frame.raw_unextendedSP = raw_unextendedSP; frame.raw_unextendedSP = raw_unextendedSP;
frame.raw_fp = raw_fp; frame.raw_fp = raw_fp;
frame.raw_fp = raw_fp;
frame.pc = pc; frame.pc = pc;
frame.deoptimized = deoptimized; frame.deoptimized = deoptimized;
return frame; return frame;
@ -269,8 +269,8 @@ public class X86Frame extends Frame {
if (isEntryFrame()) return senderForEntryFrame(map); if (isEntryFrame()) return senderForEntryFrame(map);
if (isInterpretedFrame()) return senderForInterpreterFrame(map); if (isInterpretedFrame()) return senderForInterpreterFrame(map);
if (isRicochetFrame()) return senderForRicochetFrame(map);
if (!VM.getVM().isCore()) {
if(cb == null) { if(cb == null) {
cb = VM.getVM().getCodeCache().findBlob(getPC()); cb = VM.getVM().getCodeCache().findBlob(getPC());
} else { } else {
@ -282,14 +282,26 @@ public class X86Frame extends Frame {
if (cb != null) { if (cb != null) {
return senderForCompiledFrame(map, cb); return senderForCompiledFrame(map, cb);
} }
}
// Must be native-compiled frame, i.e. the marshaling code for native // Must be native-compiled frame, i.e. the marshaling code for native
// methods that exists in the core system. // methods that exists in the core system.
return new X86Frame(getSenderSP(), getLink(), getSenderPC()); return new X86Frame(getSenderSP(), getLink(), getSenderPC());
} }
private Frame senderForRicochetFrame(X86RegisterMap map) {
if (DEBUG) {
System.out.println("senderForRicochetFrame");
}
X86RicochetFrame f = X86RicochetFrame.fromFrame(this);
if (map.getUpdateMap())
updateMapWithSavedLink(map, f.senderLinkAddress());
return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC());
}
private Frame senderForEntryFrame(X86RegisterMap map) { private Frame senderForEntryFrame(X86RegisterMap map) {
if (DEBUG) {
System.out.println("senderForEntryFrame");
}
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(map != null, "map must be set"); Assert.that(map != null, "map must be set");
} }
@ -313,7 +325,37 @@ public class X86Frame extends Frame {
return fr; return fr;
} }
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
private void adjustUnextendedSP() {
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
CodeBlob cb = cb();
NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
if (senderNm != null) {
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
if (senderNm.isDeoptMhEntry(getPC())) {
// DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
raw_unextendedSP = getFP();
}
else if (senderNm.isDeoptEntry(getPC())) {
// DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
}
else if (senderNm.isMethodHandleReturn(getPC())) {
raw_unextendedSP = getFP();
}
}
}
private Frame senderForInterpreterFrame(X86RegisterMap map) { private Frame senderForInterpreterFrame(X86RegisterMap map) {
if (DEBUG) {
System.out.println("senderForInterpreterFrame");
}
Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0); Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0);
Address sp = addressOfStackSlot(SENDER_SP_OFFSET); Address sp = addressOfStackSlot(SENDER_SP_OFFSET);
// We do not need to update the callee-save register mapping because above // We do not need to update the callee-save register mapping because above
@ -323,10 +365,21 @@ public class X86Frame extends Frame {
// However c2 no longer uses callee save register for java calls so there // However c2 no longer uses callee save register for java calls so there
// are no callee register to find. // are no callee register to find.
return new X86Frame(sp, getLink(), unextendedSP.minus(sp)); if (map.getUpdateMap())
updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET));
return new X86Frame(sp, unextendedSP, getLink(), getSenderPC());
}
private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) {
map.setLocation(rbp, savedFPAddr);
} }
private Frame senderForCompiledFrame(X86RegisterMap map, CodeBlob cb) { private Frame senderForCompiledFrame(X86RegisterMap map, CodeBlob cb) {
if (DEBUG) {
System.out.println("senderForCompiledFrame");
}
// //
// NOTE: some of this code is (unfortunately) duplicated in X86CurrentFrameGuess // NOTE: some of this code is (unfortunately) duplicated in X86CurrentFrameGuess
// //
@ -336,41 +389,35 @@ public class X86Frame extends Frame {
} }
// frame owned by optimizing compiler // frame owned by optimizing compiler
Address sender_sp = null;
if (VM.getVM().isClientCompiler()) {
sender_sp = addressOfStackSlot(SENDER_SP_OFFSET);
} else {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(cb.getFrameSize() >= 0, "Compiled by Compiler1: do not use"); Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size");
}
sender_sp = getUnextendedSP().addOffsetTo(cb.getFrameSize());
} }
Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize());
// On Intel the return_address is always the word on the stack // On Intel the return_address is always the word on the stack
Address sender_pc = sender_sp.getAddressAt(-1 * VM.getVM().getAddressSize()); Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize());
if (map.getUpdateMap() && cb.getOopMaps() != null) { // This is the saved value of EBP which may or may not really be an FP.
// It is only an FP if the sender is an interpreter frame (or C1?).
Address savedFPAddr = senderSP.addOffsetTo(- SENDER_SP_OFFSET * VM.getVM().getAddressSize());
if (map.getUpdateMap()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
map.setIncludeArgumentOops(cb.callerMustGCArguments());
if (cb.getOopMaps() != null) {
OopMapSet.updateRegisterMap(this, cb, map, true); OopMapSet.updateRegisterMap(this, cb, map, true);
} }
if (VM.getVM().isClientCompiler()) { // Since the prolog does the save and restore of EBP there is no oopmap
// Move this here for C1 and collecting oops in arguments (According to Rene) // for it so we must fill in its location as if there was an oopmap entry
map.setIncludeArgumentOops(cb.callerMustGCArguments(map.getThread())); // since if our caller was compiled code there could be live jvm state in it.
updateMapWithSavedLink(map, savedFPAddr);
} }
Address saved_fp = null; return new X86Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC);
if (VM.getVM().isClientCompiler()) {
saved_fp = getFP().getAddressAt(0);
} else if (VM.getVM().isServerCompiler() &&
(VM.getVM().getInterpreter().contains(sender_pc) ||
VM.getVM().getStubRoutines().returnsToCallStub(sender_pc))) {
// C2 prologue saves EBP in the usual place.
// however only use it if the sender had link infomration in it.
saved_fp = sender_sp.getAddressAt(-2 * VM.getVM().getAddressSize());
}
return new X86Frame(sender_sp, saved_fp, sender_pc);
} }
protected boolean hasSenderPD() { protected boolean hasSenderPD() {
@ -403,14 +450,6 @@ public class X86Frame extends Frame {
public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); } public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); }
public Address compiledArgumentToLocationPD(VMReg reg, RegisterMap regMap, int argSize) {
if (VM.getVM().isCore() || VM.getVM().isClientCompiler()) {
throw new RuntimeException("Should not reach here");
}
return oopMapRegToLocation(reg, regMap);
}
public Address addressOfInterpreterFrameLocals() { public Address addressOfInterpreterFrameLocals() {
return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET); return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET);
} }

View file

@ -0,0 +1,81 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.x86;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class X86RicochetFrame extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("MethodHandles::RicochetFrame");
senderLinkField = type.getAddressField("_sender_link");
savedArgsBaseField = type.getAddressField("_saved_args_base");
exactSenderSPField = type.getAddressField("_exact_sender_sp");
senderPCField = type.getAddressField("_sender_pc");
}
private static AddressField senderLinkField;
private static AddressField savedArgsBaseField;
private static AddressField exactSenderSPField;
private static AddressField senderPCField;
static X86RicochetFrame fromFrame(X86Frame f) {
return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset()));
}
private X86RicochetFrame(Address addr) {
super(addr);
}
public Address senderLink() {
return senderLinkField.getValue(addr);
}
public Address senderLinkAddress() {
return addr.addOffsetTo(senderLinkField.getOffset());
}
public Address savedArgsBase() {
return savedArgsBaseField.getValue(addr);
}
public Address extendedSenderSP() {
return savedArgsBase();
}
public Address exactSenderSP() {
return exactSenderSPField.getValue(addr);
}
public Address senderPC() {
return senderPCField.getValue(addr);
}
}

View file

@ -244,24 +244,6 @@ SUNWprivate_1.1 {
JVM_Yield; JVM_Yield;
JVM_handle_linux_signal; JVM_handle_linux_signal;
# Old reflection routines
# These do not need to be present in the product build in JDK 1.4
# but their code has not been removed yet because there will not
# be a substantial code savings until JVM_InvokeMethod and
# JVM_NewInstanceFromConstructor can also be removed; see
# reflectionCompat.hpp.
JVM_GetClassConstructor;
JVM_GetClassConstructors;
JVM_GetClassField;
JVM_GetClassFields;
JVM_GetClassMethod;
JVM_GetClassMethods;
JVM_GetField;
JVM_GetPrimitiveField;
JVM_NewInstance;
JVM_SetField;
JVM_SetPrimitiveField;
# debug JVM # debug JVM
JVM_AccessVMBooleanFlag; JVM_AccessVMBooleanFlag;
JVM_AccessVMIntFlag; JVM_AccessVMIntFlag;

View file

@ -244,24 +244,6 @@ SUNWprivate_1.1 {
JVM_Yield; JVM_Yield;
JVM_handle_linux_signal; JVM_handle_linux_signal;
# Old reflection routines
# These do not need to be present in the product build in JDK 1.4
# but their code has not been removed yet because there will not
# be a substantial code savings until JVM_InvokeMethod and
# JVM_NewInstanceFromConstructor can also be removed; see
# reflectionCompat.hpp.
JVM_GetClassConstructor;
JVM_GetClassConstructors;
JVM_GetClassField;
JVM_GetClassFields;
JVM_GetClassMethod;
JVM_GetClassMethods;
JVM_GetField;
JVM_GetPrimitiveField;
JVM_NewInstance;
JVM_SetField;
JVM_SetPrimitiveField;
# miscellaneous functions # miscellaneous functions
jio_fprintf; jio_fprintf;
jio_printf; jio_printf;

View file

@ -41,8 +41,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
# Linker mapfiles # Linker mapfiles
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
# This mapfile is only needed when compiling with dtrace support, # This mapfile is only needed when compiling with dtrace support,
# and mustn't be otherwise. # and mustn't be otherwise.

View file

@ -107,8 +107,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
# Linker mapfiles # Linker mapfiles
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
# This mapfile is only needed when compiling with dtrace support, # This mapfile is only needed when compiling with dtrace support,
# and mustn't be otherwise. # and mustn't be otherwise.

View file

@ -44,8 +44,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
# Linker mapfiles # Linker mapfiles
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
# This mapfile is only needed when compiling with dtrace support, # This mapfile is only needed when compiling with dtrace support,
# and mustn't be otherwise. # and mustn't be otherwise.

View file

@ -1,48 +0,0 @@
#
#
# Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Define public interface.
SUNWprivate_1.1 {
global:
# Old reflection routines
# These do not need to be present in the product build in JDK 1.4
# but their code has not been removed yet because there will not
# be a substantial code savings until JVM_InvokeMethod and
# JVM_NewInstanceFromConstructor can also be removed; see
# reflectionCompat.hpp.
JVM_GetClassConstructor;
JVM_GetClassConstructors;
JVM_GetClassField;
JVM_GetClassFields;
JVM_GetClassMethod;
JVM_GetClassMethods;
JVM_GetField;
JVM_GetPrimitiveField;
JVM_NewInstance;
JVM_SetField;
JVM_SetPrimitiveField;
};

View file

@ -48,9 +48,7 @@ endif # Platform_compiler == sparcWorks
CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
# Linker mapfiles # Linker mapfiles
# NOTE: inclusion of nonproduct mapfile not necessary; read it for details MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
# This mapfile is only needed when compiling with dtrace support, # This mapfile is only needed when compiling with dtrace support,
# and mustn't be otherwise. # and mustn't be otherwise.

View file

@ -58,13 +58,9 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
# to inhibit the effect of the previous line on CFLAGS. # to inhibit the effect of the previous line on CFLAGS.
# Linker mapfiles # Linker mapfiles
# NOTE: inclusion of nonproduct mapfile not necessary; read it for details
ifdef USE_GCC
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers
else
MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
$(GAMMADIR)/make/solaris/makefiles/mapfile-vers-nonproduct
ifndef USE_GCC
# This mapfile is only needed when compiling with dtrace support, # This mapfile is only needed when compiling with dtrace support,
# and mustn't be otherwise. # and mustn't be otherwise.
MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)

View file

@ -1794,7 +1794,8 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
set((intptr_t)real_msg, O1); // Size of set() should stay the same
patchable_set((intptr_t)real_msg, O1);
// Load address to call to into O7 // Load address to call to into O7
load_ptr_contents(a, O7); load_ptr_contents(a, O7);
// Register call to verify_oop_subroutine // Register call to verify_oop_subroutine
@ -1831,7 +1832,8 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char
ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
set((intptr_t)real_msg, O1); // Size of set() should stay the same
patchable_set((intptr_t)real_msg, O1);
// Load address to call to into O7 // Load address to call to into O7
load_ptr_contents(a, O7); load_ptr_contents(a, O7);
// Register call to verify_oop_subroutine // Register call to verify_oop_subroutine
@ -1976,7 +1978,8 @@ void MacroAssembler::stop(const char* msg) {
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
// stop_subroutine expects message pointer in I1. // stop_subroutine expects message pointer in I1.
set((intptr_t)msg, O1); // Size of set() should stay the same
patchable_set((intptr_t)msg, O1);
// factor long stop-sequence into subroutine to save space // factor long stop-sequence into subroutine to save space
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
@ -1998,7 +2001,8 @@ void MacroAssembler::warn(const char* msg) {
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
RegistersForDebugging::save_registers(this); RegistersForDebugging::save_registers(this);
mov(O0, L0); mov(O0, L0);
set((intptr_t)msg, O0); // Size of set() should stay the same
patchable_set((intptr_t)msg, O0);
call( CAST_FROM_FN_PTR(address, warning) ); call( CAST_FROM_FN_PTR(address, warning) );
delayed()->nop(); delayed()->nop();
// ret(); // ret();
@ -4901,3 +4905,65 @@ void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
// Caller should set it: // Caller should set it:
// add(G0, 1, result); // equals // add(G0, 1, result); // equals
} }
// Use BIS for zeroing (count is in bytes).
void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
Register end = count;
int cache_line_size = VM_Version::prefetch_data_size();
// Minimum count when BIS zeroing can be used since
// it needs membar which is expensive.
int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
Label small_loop;
// Check if count is negative (dead code) or zero.
// Note, count uses 64bit in 64 bit VM.
cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
// Use BIS zeroing only for big arrays since it requires membar.
if (Assembler::is_simm13(block_zero_size)) { // < 4096
cmp(count, block_zero_size);
} else {
set(block_zero_size, temp);
cmp(count, temp);
}
br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
delayed()->add(to, count, end);
// Note: size is >= three (32 bytes) cache lines.
// Clean the beginning of space up to next cache line.
for (int offs = 0; offs < cache_line_size; offs += 8) {
stx(G0, to, offs);
}
// align to next cache line
add(to, cache_line_size, to);
and3(to, -cache_line_size, to);
// Note: size left >= two (32 bytes) cache lines.
// BIS should not be used to zero tail (64 bytes)
// to avoid zeroing a header of the following object.
sub(end, (cache_line_size*2)-8, end);
Label bis_loop;
bind(bis_loop);
stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
add(to, cache_line_size, to);
cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
// BIS needs membar.
membar(Assembler::StoreLoad);
add(end, (cache_line_size*2)-8, end); // restore end
cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
// Clean the tail.
bind(small_loop);
stx(G0, to, 0);
add(to, 8, to);
cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
nop(); // Separate short branches
}

View file

@ -886,6 +886,7 @@ class Assembler : public AbstractAssembler {
enum ASIs { // page 72, v9 enum ASIs { // page 72, v9
ASI_PRIMARY = 0x80, ASI_PRIMARY = 0x80,
ASI_PRIMARY_NOFAULT = 0x82,
ASI_PRIMARY_LITTLE = 0x88, ASI_PRIMARY_LITTLE = 0x88,
// Block initializing store // Block initializing store
ASI_ST_BLKINIT_PRIMARY = 0xE2, ASI_ST_BLKINIT_PRIMARY = 0xE2,
@ -1786,9 +1787,12 @@ public:
rs1(s) | rs1(s) |
op3(wrreg_op3) | op3(wrreg_op3) |
u_field(2, 29, 25) | u_field(2, 29, 25) |
u_field(1, 13, 13) | immed(true) |
simm(simm13a, 13)); } simm(simm13a, 13)); }
inline void wrasi(Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); } inline void wrasi(Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
// wrasi(d, imm) stores (d xor imm) to asi
inline void wrasi(Register d, int simm13a) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) |
u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); }
inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); } inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
@ -2625,6 +2629,8 @@ public:
void char_arrays_equals(Register ary1, Register ary2, void char_arrays_equals(Register ary1, Register ary2,
Register limit, Register result, Register limit, Register result,
Register chr1, Register chr2, Label& Ldone); Register chr1, Register chr2, Label& Ldone);
// Use BIS for zeroing
void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
#undef VIRTUAL #undef VIRTUAL

View file

@ -142,11 +142,6 @@ LIR_Opr LIR_Assembler::receiverOpr() {
} }
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
return FrameMap::I0_oop_opr;
}
LIR_Opr LIR_Assembler::osrBufferPointer() { LIR_Opr LIR_Assembler::osrBufferPointer() {
return FrameMap::I0_opr; return FrameMap::I0_opr;
} }

View file

@ -782,13 +782,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
break; break;
case jvmti_exception_throw_id:
{ // Oexception : exception
__ set_info("jvmti_exception_throw", dont_gc_arguments);
oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
}
break;
case dtrace_object_alloc_id: case dtrace_object_alloc_id:
{ // O0: object { // O0: object
__ set_info("dtrace_object_alloc", dont_gc_arguments); __ set_info("dtrace_object_alloc", dont_gc_arguments);

View file

@ -156,9 +156,16 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#endif // _LP64 #endif // _LP64
} }
typedef void (*_zero_Fn)(HeapWord* to, size_t count);
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) { static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
if (value == 0 && UseBlockZeroing &&
(count > (BlockZeroingLowLimit >> LogHeapWordSize))) {
// Call it only when block zeroing is used
((_zero_Fn)StubRoutines::zero_aligned_words())(tohw, count);
} else {
julong* to = (julong*)tohw; julong* to = (julong*)tohw;
julong v = ((julong)value << 32) | value; julong v = ((julong)value << 32) | value;
// If count is odd, odd will be equal to 1 on 32-bit platform // If count is odd, odd will be equal to 1 on 32-bit platform
@ -177,6 +184,7 @@ static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value)
} }
} }
}
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) { static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
(void)memset(to, value, count); (void)memset(to, value, count);

View file

@ -259,13 +259,8 @@
}; };
#endif /* CC_INTERP */ #endif /* CC_INTERP */
// the compiler frame has many of the same fields as the interpreter frame
// %%%%% factor out declarations of the shared fields
enum compiler_frame_fixed_locals { enum compiler_frame_fixed_locals {
compiler_frame_d_scratch_fp_offset = -2, compiler_frame_vm_locals_fp_offset = -2
compiler_frame_vm_locals_fp_offset = -2, // should be same as above
compiler_frame_vm_local_words = -compiler_frame_vm_locals_fp_offset
}; };
private: private:
@ -283,9 +278,6 @@
inline void interpreter_frame_set_tos_address(intptr_t* x); inline void interpreter_frame_set_tos_address(intptr_t* x);
// %%%%% Another idea: instead of defining 3 fns per item, just define one returning a ref
// monitors: // monitors:
// next two fns read and write Lmonitors value, // next two fns read and write Lmonitors value,
@ -298,22 +290,8 @@
return ((interpreterState)sp_at(interpreter_state_ptr_offset)); return ((interpreterState)sp_at(interpreter_state_ptr_offset));
} }
#endif /* CC_INTERP */ #endif /* CC_INTERP */
// Compiled frames
public: public:
// Tells if this register can hold 64 bits on V9 (really, V8+).
static bool holds_a_doubleword(Register reg) {
#ifdef _LP64
// return true;
return reg->is_out() || reg->is_global();
#else
return reg->is_out() || reg->is_global();
#endif
}
#endif // CPU_SPARC_VM_FRAME_SPARC_HPP #endif // CPU_SPARC_VM_FRAME_SPARC_HPP

View file

@ -1262,6 +1262,15 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
} }
break; break;
case _adapter_opt_profiling:
if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
__ ld(G3_mh_vmcount, O1_scratch);
__ add(O1_scratch, 1, O1_scratch);
__ st(O1_scratch, G3_mh_vmcount);
}
// fall through
case _adapter_retype_only: case _adapter_retype_only:
case _adapter_retype_raw: case _adapter_retype_raw:
// Immediately jump to the next MH layer: // Immediately jump to the next MH layer:

View file

@ -460,6 +460,8 @@ source_hpp %{
// Must be visible to the DFA in dfa_sparc.cpp // Must be visible to the DFA in dfa_sparc.cpp
extern bool can_branch_register( Node *bol, Node *cmp ); extern bool can_branch_register( Node *bol, Node *cmp );
extern bool use_block_zeroing(Node* count);
// Macros to extract hi & lo halves from a long pair. // Macros to extract hi & lo halves from a long pair.
// G0 is not part of any long pair, so assert on that. // G0 is not part of any long pair, so assert on that.
// Prevents accidentally using G1 instead of G0. // Prevents accidentally using G1 instead of G0.
@ -521,6 +523,12 @@ bool can_branch_register( Node *bol, Node *cmp ) {
return false; return false;
} }
bool use_block_zeroing(Node* count) {
// Use BIS for zeroing if count is not constant
// or it is >= BlockZeroingLowLimit.
return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
}
// **************************************************************************** // ****************************************************************************
// REQUIRED FUNCTIONALITY // REQUIRED FUNCTIONALITY
@ -832,6 +840,7 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) && !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) && !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) && !(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) && !(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
!(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) && !(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
@ -2810,25 +2819,6 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
__ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
%} %}
// Compiler ensures base is doubleword aligned and cnt is count of doublewords
enc_class enc_Clear_Array(iRegX cnt, iRegP base, iRegX temp) %{
MacroAssembler _masm(&cbuf);
Register nof_bytes_arg = reg_to_register_object($cnt$$reg);
Register nof_bytes_tmp = reg_to_register_object($temp$$reg);
Register base_pointer_arg = reg_to_register_object($base$$reg);
Label loop;
__ mov(nof_bytes_arg, nof_bytes_tmp);
// Loop and clear, walking backwards through the array.
// nof_bytes_tmp (if >0) is always the number of bytes to zero
__ bind(loop);
__ deccc(nof_bytes_tmp, 8);
__ br(Assembler::greaterEqual, true, Assembler::pt, loop);
__ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
// %%%% this mini-loop must not cross a cache boundary!
%}
enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
Label Ldone, Lloop; Label Ldone, Lloop;
@ -10257,9 +10247,9 @@ instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o
ins_pipe(long_memory_op); ins_pipe(long_memory_op);
%} %}
// Count and Base registers are fixed because the allocator cannot // The encodings are generic.
// kill unknown registers. The encodings are generic.
instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
predicate(!use_block_zeroing(n->in(2)) );
match(Set dummy (ClearArray cnt base)); match(Set dummy (ClearArray cnt base));
effect(TEMP temp, KILL ccr); effect(TEMP temp, KILL ccr);
ins_cost(300); ins_cost(300);
@ -10267,7 +10257,71 @@ instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg
"loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
" BRge loop\t\t! Clearing loop\n" " BRge loop\t\t! Clearing loop\n"
" STX G0,[$base+$temp]\t! delay slot" %} " STX G0,[$base+$temp]\t! delay slot" %}
ins_encode( enc_Clear_Array(cnt, base, temp) );
ins_encode %{
// Compiler ensures base is doubleword aligned and cnt is count of doublewords
Register nof_bytes_arg = $cnt$$Register;
Register nof_bytes_tmp = $temp$$Register;
Register base_pointer_arg = $base$$Register;
Label loop;
__ mov(nof_bytes_arg, nof_bytes_tmp);
// Loop and clear, walking backwards through the array.
// nof_bytes_tmp (if >0) is always the number of bytes to zero
__ bind(loop);
__ deccc(nof_bytes_tmp, 8);
__ br(Assembler::greaterEqual, true, Assembler::pt, loop);
__ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
// %%%% this mini-loop must not cross a cache boundary!
%}
ins_pipe(long_memory_op);
%}
instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
predicate(use_block_zeroing(n->in(2)));
match(Set dummy (ClearArray cnt base));
effect(USE_KILL cnt, USE_KILL base, KILL ccr);
ins_cost(300);
format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
ins_encode %{
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
Register to = $base$$Register;
Register count = $cnt$$Register;
Label Ldone;
__ nop(); // Separate short branches
// Use BIS for zeroing (temp is not used).
__ bis_zeroing(to, count, G0, Ldone);
__ bind(Ldone);
%}
ins_pipe(long_memory_op);
%}
instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
match(Set dummy (ClearArray cnt base));
effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
ins_cost(300);
format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
ins_encode %{
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
Register to = $base$$Register;
Register count = $cnt$$Register;
Register temp = $tmp$$Register;
Label Ldone;
__ nop(); // Separate short branches
// Use BIS for zeroing
__ bis_zeroing(to, count, temp, Ldone);
__ bind(Ldone);
%}
ins_pipe(long_memory_op); ins_pipe(long_memory_op);
%} %}

View file

@ -1124,6 +1124,126 @@ class StubGenerator: public StubCodeGenerator {
} }
} }
//
// Generate main code for disjoint arraycopy
//
typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
Label& L_loop, bool use_prefetch, bool use_bis);
void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
int iter_size, CopyLoopFunc copy_loop_func) {
Label L_copy;
assert(log2_elem_size <= 3, "the following code should be changed");
int count_dec = 16>>log2_elem_size;
int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
assert(prefetch_dist < 4096, "invalid value");
prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
if (UseBlockCopy) {
Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
// 64 bytes tail + bytes copied in one loop iteration
int tail_size = 64 + iter_size;
int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
// Use BIS copy only for big arrays since it requires membar.
__ set(block_copy_count, O4);
__ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
// This code is for disjoint source and destination:
// to <= from || to >= from+count
// but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
__ sub(from, to, O4);
__ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
__ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
__ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
// BIS should not be used to copy tail (64 bytes+iter_size)
// to avoid zeroing of following values.
__ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
if (prefetch_count > 0) { // rounded up to one iteration count
// Do prefetching only if copy size is bigger
// than prefetch distance.
__ set(prefetch_count, O4);
__ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
__ sub(count, prefetch_count, count);
(this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
__ add(count, prefetch_count, count); // restore count
} // prefetch_count > 0
(this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
__ add(count, (tail_size>>log2_elem_size), count); // restore count
__ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
// BIS needs membar.
__ membar(Assembler::StoreLoad);
// Copy tail
__ ba_short(L_copy);
__ BIND(L_skip_block_copy);
} // UseBlockCopy
if (prefetch_count > 0) { // rounded up to one iteration count
// Do prefetching only if copy size is bigger
// than prefetch distance.
__ set(prefetch_count, O4);
__ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
__ sub(count, prefetch_count, count);
Label L_copy_prefetch;
(this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
__ add(count, prefetch_count, count); // restore count
} // prefetch_count > 0
(this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
}
//
// Helper methods for copy_16_bytes_forward_with_shift()
//
void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
Label& L_loop, bool use_prefetch, bool use_bis) {
const Register left_shift = G1; // left shift bit counter
const Register right_shift = G5; // right shift bit counter
__ align(OptoLoopAlignment);
__ BIND(L_loop);
if (use_prefetch) {
if (ArraycopySrcPrefetchDistance > 0) {
__ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
}
if (ArraycopyDstPrefetchDistance > 0) {
__ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
}
}
__ ldx(from, 0, O4);
__ ldx(from, 8, G4);
__ inc(to, 16);
__ inc(from, 16);
__ deccc(count, count_dec); // Can we do next iteration after this one?
__ srlx(O4, right_shift, G3);
__ bset(G3, O3);
__ sllx(O4, left_shift, O4);
__ srlx(G4, right_shift, G3);
__ bset(G3, O4);
if (use_bis) {
__ stxa(O3, to, -16);
__ stxa(O4, to, -8);
} else {
__ stx(O3, to, -16);
__ stx(O4, to, -8);
}
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->sllx(G4, left_shift, O3);
}
// Copy big chunks forward with shift // Copy big chunks forward with shift
// //
@ -1135,8 +1255,10 @@ class StubGenerator: public StubCodeGenerator {
// L_copy_bytes - copy exit label // L_copy_bytes - copy exit label
// //
void copy_16_bytes_forward_with_shift(Register from, Register to, void copy_16_bytes_forward_with_shift(Register from, Register to,
Register count, int count_dec, Label& L_copy_bytes) { Register count, int log2_elem_size, Label& L_copy_bytes) {
Label L_loop, L_aligned_copy, L_copy_last_bytes; Label L_aligned_copy, L_copy_last_bytes;
assert(log2_elem_size <= 3, "the following code should be changed");
int count_dec = 16>>log2_elem_size;
// if both arrays have the same alignment mod 8, do 8 bytes aligned copy // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
__ andcc(from, 7, G1); // misaligned bytes __ andcc(from, 7, G1); // misaligned bytes
@ -1154,27 +1276,13 @@ class StubGenerator: public StubCodeGenerator {
// Load 2 aligned 8-bytes chunks and use one from previous iteration // Load 2 aligned 8-bytes chunks and use one from previous iteration
// to form 2 aligned 8-bytes chunks to store. // to form 2 aligned 8-bytes chunks to store.
// //
__ deccc(count, count_dec); // Pre-decrement 'count' __ dec(count, count_dec); // Pre-decrement 'count'
__ andn(from, 7, from); // Align address __ andn(from, 7, from); // Align address
__ ldx(from, 0, O3); __ ldx(from, 0, O3);
__ inc(from, 8); __ inc(from, 8);
__ align(OptoLoopAlignment);
__ BIND(L_loop);
__ ldx(from, 0, O4);
__ deccc(count, count_dec); // Can we do next iteration after this one?
__ ldx(from, 8, G4);
__ inc(to, 16);
__ inc(from, 16);
__ sllx(O3, left_shift, O3); __ sllx(O3, left_shift, O3);
__ srlx(O4, right_shift, G3);
__ bset(G3, O3); disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
__ stx(O3, to, -16);
__ sllx(O4, left_shift, O4);
__ srlx(G4, right_shift, G3);
__ bset(G3, O4);
__ stx(O4, to, -8);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->mov(G4, O3);
__ inccc(count, count_dec>>1 ); // + 8 bytes __ inccc(count, count_dec>>1 ); // + 8 bytes
__ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes); __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
@ -1184,7 +1292,6 @@ class StubGenerator: public StubCodeGenerator {
__ ldx(from, 0, O4); __ ldx(from, 0, O4);
__ inc(to, 8); __ inc(to, 8);
__ inc(from, 8); __ inc(from, 8);
__ sllx(O3, left_shift, O3);
__ srlx(O4, right_shift, G3); __ srlx(O4, right_shift, G3);
__ bset(O3, G3); __ bset(O3, G3);
__ stx(G3, to, -8); __ stx(G3, to, -8);
@ -1348,7 +1455,7 @@ class StubGenerator: public StubCodeGenerator {
// The compare above (count >= 23) guarantes 'count' >= 16 bytes. // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
// Also jump over aligned copy after the copy with shift completed. // Also jump over aligned copy after the copy with shift completed.
copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte); copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
} }
// Both array are 8 bytes aligned, copy 16 bytes at a time // Both array are 8 bytes aligned, copy 16 bytes at a time
@ -1576,7 +1683,7 @@ class StubGenerator: public StubCodeGenerator {
// The compare above (count >= 11) guarantes 'count' >= 16 bytes. // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
// Also jump over aligned copy after the copy with shift completed. // Also jump over aligned copy after the copy with shift completed.
copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes); copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
} }
// Both array are 8 bytes aligned, copy 16 bytes at a time // Both array are 8 bytes aligned, copy 16 bytes at a time
@ -1949,6 +2056,45 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
//
// Helper methods for generate_disjoint_int_copy_core()
//
void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
Label& L_loop, bool use_prefetch, bool use_bis) {
__ align(OptoLoopAlignment);
__ BIND(L_loop);
if (use_prefetch) {
if (ArraycopySrcPrefetchDistance > 0) {
__ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
}
if (ArraycopyDstPrefetchDistance > 0) {
__ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
}
}
__ ldx(from, 4, O4);
__ ldx(from, 12, G4);
__ inc(to, 16);
__ inc(from, 16);
__ deccc(count, 4); // Can we do next iteration after this one?
__ srlx(O4, 32, G3);
__ bset(G3, O3);
__ sllx(O4, 32, O4);
__ srlx(G4, 32, G3);
__ bset(G3, O4);
if (use_bis) {
__ stxa(O3, to, -16);
__ stxa(O4, to, -8);
} else {
__ stx(O3, to, -16);
__ stx(O4, to, -8);
}
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->sllx(G4, 32, O3);
}
// //
// Generate core code for disjoint int copy (and oop copy on 32-bit). // Generate core code for disjoint int copy (and oop copy on 32-bit).
// If "aligned" is true, the "from" and "to" addresses are assumed // If "aligned" is true, the "from" and "to" addresses are assumed
@ -1962,7 +2108,7 @@ class StubGenerator: public StubCodeGenerator {
void generate_disjoint_int_copy_core(bool aligned) { void generate_disjoint_int_copy_core(bool aligned) {
Label L_skip_alignment, L_aligned_copy; Label L_skip_alignment, L_aligned_copy;
Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit; Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
const Register from = O0; // source array address const Register from = O0; // source array address
const Register to = O1; // destination array address const Register to = O1; // destination array address
@ -2013,30 +2159,16 @@ class StubGenerator: public StubCodeGenerator {
// copy with shift 4 elements (16 bytes) at a time // copy with shift 4 elements (16 bytes) at a time
__ dec(count, 4); // The cmp at the beginning guaranty count >= 4 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4
__ align(OptoLoopAlignment);
__ BIND(L_copy_16_bytes);
__ ldx(from, 4, O4);
__ deccc(count, 4); // Can we do next iteration after this one?
__ ldx(from, 12, G4);
__ inc(to, 16);
__ inc(from, 16);
__ sllx(O3, 32, O3); __ sllx(O3, 32, O3);
__ srlx(O4, 32, G3);
__ bset(G3, O3); disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
__ stx(O3, to, -16);
__ sllx(O4, 32, O4);
__ srlx(G4, 32, G3);
__ bset(G3, O4);
__ stx(O4, to, -8);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
__ delayed()->mov(G4, O3);
__ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes); __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
__ delayed()->inc(count, 4); // restore 'count' __ delayed()->inc(count, 4); // restore 'count'
__ BIND(L_aligned_copy); __ BIND(L_aligned_copy);
} } // !aligned
// copy 4 elements (16 bytes) at a time // copy 4 elements (16 bytes) at a time
__ and3(count, 1, G4); // Save __ and3(count, 1, G4); // Save
__ srl(count, 1, count); __ srl(count, 1, count);
@ -2222,6 +2354,38 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
//
// Helper methods for generate_disjoint_long_copy_core()
//
void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
Label& L_loop, bool use_prefetch, bool use_bis) {
__ align(OptoLoopAlignment);
__ BIND(L_loop);
for (int off = 0; off < 64; off += 16) {
if (use_prefetch && (off & 31) == 0) {
if (ArraycopySrcPrefetchDistance > 0) {
__ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
}
if (ArraycopyDstPrefetchDistance > 0) {
__ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
}
}
__ ldx(from, off+0, O4);
__ ldx(from, off+8, O5);
if (use_bis) {
__ stxa(O4, to, off+0);
__ stxa(O5, to, off+8);
} else {
__ stx(O4, to, off+0);
__ stx(O5, to, off+8);
}
}
__ deccc(count, 8);
__ inc(from, 64);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->inc(to, 64);
}
// //
// Generate core code for disjoint long copy (and oop copy on 64-bit). // Generate core code for disjoint long copy (and oop copy on 64-bit).
// "aligned" is ignored, because we must make the stronger // "aligned" is ignored, because we must make the stronger
@ -2267,7 +2431,7 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->add(offset0, 8, offset8); __ delayed()->add(offset0, 8, offset8);
// Copy by 64 bytes chunks // Copy by 64 bytes chunks
Label L_copy_64_bytes;
const Register from64 = O3; // source address const Register from64 = O3; // source address
const Register to64 = G3; // destination address const Register to64 = G3; // destination address
__ subcc(count, 6, O3); __ subcc(count, 6, O3);
@ -2275,24 +2439,14 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->mov(to, to64); __ delayed()->mov(to, to64);
// Now we can use O4(offset0), O5(offset8) as temps // Now we can use O4(offset0), O5(offset8) as temps
__ mov(O3, count); __ mov(O3, count);
// count >= 0 (original count - 8)
__ mov(from, from64); __ mov(from, from64);
__ align(OptoLoopAlignment); disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop);
__ BIND(L_copy_64_bytes);
for( int off = 0; off < 64; off += 16 ) {
__ ldx(from64, off+0, O4);
__ ldx(from64, off+8, O5);
__ stx(O4, to64, off+0);
__ stx(O5, to64, off+8);
}
__ deccc(count, 8);
__ inc(from64, 64);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
__ delayed()->inc(to64, 64);
// Restore O4(offset0), O5(offset8) // Restore O4(offset0), O5(offset8)
__ sub(from64, from, offset0); __ sub(from64, from, offset0);
__ inccc(count, 6); __ inccc(count, 6); // restore count
__ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
__ delayed()->add(offset0, 8, offset8); __ delayed()->add(offset0, 8, offset8);
@ -3069,6 +3223,34 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
//
// Generate stub for heap zeroing.
// "to" address is aligned to jlong (8 bytes).
//
// Arguments for generated stub:
// to: O0
// count: O1 treated as signed (count of HeapWord)
// count could be 0
//
address generate_zero_aligned_words(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
const Register to = O0; // source array address
const Register count = O1; // HeapWords count
const Register temp = O2; // scratch
Label Ldone;
__ sllx(count, LogHeapWordSize, count); // to bytes count
// Use BIS for zeroing
__ bis_zeroing(to, count, temp, Ldone);
__ bind(Ldone);
__ retl();
__ delayed()->nop();
return start;
}
void generate_arraycopy_stubs() { void generate_arraycopy_stubs() {
address entry; address entry;
address entry_jbyte_arraycopy; address entry_jbyte_arraycopy;
@ -3195,6 +3377,10 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
if (UseBlockZeroing) {
StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
}
} }
void generate_initial() { void generate_initial() {

View file

@ -3374,7 +3374,7 @@ void TemplateTable::_new() {
if(UseTLAB) { if(UseTLAB) {
Register RoldTopValue = RallocatedObject; Register RoldTopValue = RallocatedObject;
Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch; Register RtlabWasteLimitValue = G3_scratch;
Register RnewTopValue = G1_scratch; Register RnewTopValue = G1_scratch;
Register RendValue = Rscratch; Register RendValue = Rscratch;
Register RfreeValue = RnewTopValue; Register RfreeValue = RnewTopValue;
@ -3455,7 +3455,11 @@ void TemplateTable::_new() {
__ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
// initialize remaining object fields // initialize remaining object fields
{ Label loop; if (UseBlockZeroing) {
// Use BIS for zeroing
__ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
} else {
Label loop;
__ subcc(Roffset, wordSize, Roffset); __ subcc(Roffset, wordSize, Roffset);
__ bind(loop); __ bind(loop);
//__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot

View file

@ -75,6 +75,24 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1); FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
} }
if (has_v9()) {
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
if (ArraycopySrcPrefetchDistance >= 4096)
ArraycopySrcPrefetchDistance = 4064;
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
if (ArraycopyDstPrefetchDistance >= 4096)
ArraycopyDstPrefetchDistance = 4064;
} else {
if (ArraycopySrcPrefetchDistance > 0) {
warning("prefetch instructions are not available on this CPU");
FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
}
if (ArraycopyDstPrefetchDistance > 0) {
warning("prefetch instructions are not available on this CPU");
FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
}
}
UseSSE = 0; // Only on x86 and x64 UseSSE = 0; // Only on x86 and x64
_supports_cx8 = has_v9(); _supports_cx8 = has_v9();
@ -170,6 +188,26 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCBCond, false); FLAG_SET_DEFAULT(UseCBCond, false);
} }
assert(BlockZeroingLowLimit > 0, "invalid value");
if (has_block_zeroing()) {
if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
FLAG_SET_DEFAULT(UseBlockZeroing, true);
}
} else if (UseBlockZeroing) {
warning("BIS zeroing instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseBlockZeroing, false);
}
assert(BlockCopyLowLimit > 0, "invalid value");
if (has_block_zeroing()) { // has_blk_init() && is_T4(): core's local L2 cache
if (FLAG_IS_DEFAULT(UseBlockCopy)) {
FLAG_SET_DEFAULT(UseBlockCopy, true);
}
} else if (UseBlockCopy) {
warning("BIS instructions are not available or expensive on this CPU");
FLAG_SET_DEFAULT(UseBlockCopy, false);
}
#ifdef COMPILER2 #ifdef COMPILER2
// T4 and newer Sparc cpus have fast RDPC. // T4 and newer Sparc cpus have fast RDPC.
if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) { if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) {

View file

@ -135,8 +135,8 @@ public:
// T4 and newer Sparc have fast RDPC instruction. // T4 and newer Sparc have fast RDPC instruction.
static bool has_fast_rdpc() { return is_T4(); } static bool has_fast_rdpc() { return is_T4(); }
// T4 and newer Sparc have Most-Recently-Used (MRU) BIS. // On T4 and newer Sparc BIS to the beginning of cache line always zeros it.
static bool has_mru_blk_init() { return has_blk_init() && is_T4(); } static bool has_block_zeroing() { return has_blk_init() && is_T4(); }
static const char* cpu_features() { return _features_str; } static const char* cpu_features() { return _features_str; }

View file

@ -129,10 +129,6 @@ LIR_Opr LIR_Assembler::receiverOpr() {
return FrameMap::receiver_opr; return FrameMap::receiver_opr;
} }
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
return receiverOpr();
}
LIR_Opr LIR_Assembler::osrBufferPointer() { LIR_Opr LIR_Assembler::osrBufferPointer() {
return FrameMap::as_pointer_opr(receiverOpr()->as_register()); return FrameMap::as_pointer_opr(receiverOpr()->as_register());
} }
@ -371,55 +367,6 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info)
} }
void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
if (exception->is_valid()) {
// preserve exception
// note: the monitor_exit runtime call is a leaf routine
// and cannot block => no GC can happen
// The slow case (MonitorAccessStub) uses the first two stack slots
// ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
__ movptr (Address(rsp, 2*wordSize), exception);
}
Register obj_reg = obj_opr->as_register();
Register lock_reg = lock_opr->as_register();
// setup registers (lock_reg must be rax, for lock_object)
assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here");
Register hdr = lock_reg;
assert(new_hdr == SYNC_header, "wrong register");
lock_reg = new_hdr;
// compute pointer to BasicLock
Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
__ lea(lock_reg, lock_addr);
// unlock object
MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
// _slow_case_stubs->append(slow_case);
// temporary fix: must be created after exceptionhandler, therefore as call stub
_slow_case_stubs->append(slow_case);
if (UseFastLocking) {
// try inlined fast unlocking first, revert to slow locking if it fails
// note: lock_reg points to the displaced header since the displaced header offset is 0!
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
} else {
// always do slow unlocking
// note: the slow unlocking code could be inlined here, however if we use
// slow unlocking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow unlocking code is the same in either case which simplifies
// debugging
__ jmp(*slow_case->entry());
}
// done
__ bind(*slow_case->continuation());
if (exception->is_valid()) {
// restore exception
__ movptr (exception, Address(rsp, 2 * wordSize));
}
}
// This specifies the rsp decrement needed to build the frame // This specifies the rsp decrement needed to build the frame
int LIR_Assembler::initial_frame_size_in_bytes() { int LIR_Assembler::initial_frame_size_in_bytes() {
// if rounding, must let FrameMap know! // if rounding, must let FrameMap know!

View file

@ -29,8 +29,6 @@
Address::ScaleFactor array_element_size(BasicType type) const; Address::ScaleFactor array_element_size(BasicType type) const;
void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception);
void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack); void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
// helper functions which checks for overflow and sets bailout if it // helper functions which checks for overflow and sets bailout if it

View file

@ -1465,19 +1465,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
break; break;
case jvmti_exception_throw_id:
{ // rax,: exception oop
StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
// Preserve all registers across this potentially blocking call
const int num_rt_args = 2; // thread, exception oop
OopMap* map = save_live_registers(sasm, num_rt_args);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm);
}
break;
case dtrace_object_alloc_id: case dtrace_object_alloc_id:
{ // rax,: object { // rax,: object
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);

View file

@ -1343,6 +1343,13 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
} }
break; break;
case _adapter_opt_profiling:
if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
Address rcx_mh_vmcount(rcx_recv, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
__ incrementl(rcx_mh_vmcount);
}
// fall through
case _adapter_retype_only: case _adapter_retype_only:
case _adapter_retype_raw: case _adapter_retype_raw:
// immediately jump to the next MH layer: // immediately jump to the next MH layer:

View file

@ -110,6 +110,7 @@ public:
class RicochetFrame { class RicochetFrame {
friend class MethodHandles; friend class MethodHandles;
friend class VMStructs;
private: private:
intptr_t* _continuation; // what to do when control gets back here intptr_t* _continuation; // what to do when control gets back here

View file

@ -346,7 +346,6 @@ void Compilation::install_code(int frame_size) {
implicit_exception_table(), implicit_exception_table(),
compiler(), compiler(),
_env->comp_level(), _env->comp_level(),
true,
has_unsafe_access() has_unsafe_access()
); );
} }

View file

@ -28,8 +28,10 @@
#include "c1/c1_Compilation.hpp" #include "c1/c1_Compilation.hpp"
#include "c1/c1_GraphBuilder.hpp" #include "c1/c1_GraphBuilder.hpp"
#include "c1/c1_InstructionPrinter.hpp" #include "c1/c1_InstructionPrinter.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciField.hpp" #include "ci/ciField.hpp"
#include "ci/ciKlass.hpp" #include "ci/ciKlass.hpp"
#include "ci/ciMethodHandle.hpp"
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "interpreter/bytecode.hpp" #include "interpreter/bytecode.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
@ -1424,7 +1426,7 @@ void GraphBuilder::method_return(Value x) {
// See whether this is the first return; if so, store off some // See whether this is the first return; if so, store off some
// of the state for later examination // of the state for later examination
if (num_returns() == 0) { if (num_returns() == 0) {
set_inline_cleanup_info(_block, _last, state()); set_inline_cleanup_info();
} }
// The current bci() is in the wrong scope, so use the bci() of // The current bci() is in the wrong scope, so use the bci() of
@ -1582,6 +1584,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = Bytecodes::_invokespecial; code = Bytecodes::_invokespecial;
} }
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
// NEEDS_CLEANUP // NEEDS_CLEANUP
// I've added the target-is_loaded() test below but I don't really understand // I've added the target-is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false. // how klass->is_loaded() can be true and yet target->is_loaded() is false.
@ -1693,26 +1697,31 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
&& target->will_link(klass, callee_holder, code)) { && target->will_link(klass, callee_holder, code)) {
// callee is known => check if we have static binding // callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known"); assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic if (code == Bytecodes::_invokestatic ||
|| code == Bytecodes::_invokespecial code == Bytecodes::_invokespecial ||
|| code == Bytecodes::_invokevirtual && target->is_final_method() code == Bytecodes::_invokevirtual && target->is_final_method() ||
) { code == Bytecodes::_invokedynamic) {
ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target;
bool success = false;
if (target->is_method_handle_invoke()) {
// method handle invokes
success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target);
}
if (!success) {
// static binding => check if callee is ok // static binding => check if callee is ok
ciMethod* inline_target = (cha_monomorphic_target != NULL) success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
? cha_monomorphic_target }
: target;
bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
CHECK_BAILOUT(); CHECK_BAILOUT();
#ifndef PRODUCT #ifndef PRODUCT
// printing // printing
if (PrintInlining && !res) { if (PrintInlining && !success) {
// if it was successfully inlined, then it was already printed. // if it was successfully inlined, then it was already printed.
print_inline_result(inline_target, res); print_inline_result(inline_target, success);
} }
#endif #endif
clear_inline_bailout(); clear_inline_bailout();
if (res) { if (success) {
// Register dependence if JVMTI has either breakpoint // Register dependence if JVMTI has either breakpoint
// setting or hotswapping of methods capabilities since they may // setting or hotswapping of methods capabilities since they may
// cause deoptimization. // cause deoptimization.
@ -1740,7 +1749,6 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial || code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual || code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface; code == Bytecodes::_invokeinterface;
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
ValueType* result_type = as_ValueType(target->return_type()); ValueType* result_type = as_ValueType(target->return_type());
// We require the debug info to be the "state before" because // We require the debug info to be the "state before" because
@ -3038,7 +3046,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
INLINE_BAILOUT("disallowed by CompilerOracle") INLINE_BAILOUT("disallowed by CompilerOracle")
} else if (!callee->can_be_compiled()) { } else if (!callee->can_be_compiled()) {
// callee is not compilable (prob. has breakpoints) // callee is not compilable (prob. has breakpoints)
INLINE_BAILOUT("not compilable") INLINE_BAILOUT("not compilable (disabled)")
} else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) { } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
// intrinsics can be native or not // intrinsics can be native or not
return true; return true;
@ -3397,7 +3405,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
} }
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
assert(!callee->is_native(), "callee must not be native"); assert(!callee->is_native(), "callee must not be native");
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) { if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
INLINE_BAILOUT("inlining prohibited by policy"); INLINE_BAILOUT("inlining prohibited by policy");
@ -3430,7 +3438,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
} else { } else {
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining"); if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining");
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining"); if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
if (callee->code_size() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large");
// don't inline throwable methods unless the inlining tree is rooted in a throwable class // don't inline throwable methods unless the inlining tree is rooted in a throwable class
if (callee->name() == ciSymbol::object_initializer_name() && if (callee->name() == ciSymbol::object_initializer_name() &&
@ -3468,7 +3476,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
// Insert null check if necessary // Insert null check if necessary
Value recv = NULL; Value recv = NULL;
if (code() != Bytecodes::_invokestatic) { if (code() != Bytecodes::_invokestatic &&
code() != Bytecodes::_invokedynamic) {
// note: null check must happen even if first instruction of callee does // note: null check must happen even if first instruction of callee does
// an implicit null check since the callee is in a different scope // an implicit null check since the callee is in a different scope
// and we must make sure exception handling does the right thing // and we must make sure exception handling does the right thing
@ -3496,7 +3505,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
// fall-through of control flow, all return instructions of the // fall-through of control flow, all return instructions of the
// callee will need to be replaced by Goto's pointing to this // callee will need to be replaced by Goto's pointing to this
// continuation point. // continuation point.
BlockBegin* cont = block_at(next_bci()); BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci());
bool continuation_existed = true; bool continuation_existed = true;
if (cont == NULL) { if (cont == NULL) {
cont = new BlockBegin(next_bci()); cont = new BlockBegin(next_bci());
@ -3608,6 +3617,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
// block merging. This allows load elimination and CSE to take place // block merging. This allows load elimination and CSE to take place
// across multiple callee scopes if they are relatively simple, and // across multiple callee scopes if they are relatively simple, and
// is currently essential to making inlining profitable. // is currently essential to making inlining profitable.
if (cont_block == NULL) {
if (num_returns() == 1 if (num_returns() == 1
&& block() == orig_block && block() == orig_block
&& block() == inline_cleanup_block()) { && block() == inline_cleanup_block()) {
@ -3631,6 +3641,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
_skip_block = true; _skip_block = true;
} }
} }
}
// Fill the exception handler for synchronized methods with instructions // Fill the exception handler for synchronized methods with instructions
if (callee->is_synchronized() && sync_handler->state() != NULL) { if (callee->is_synchronized() && sync_handler->state() != NULL) {
@ -3645,6 +3656,114 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
} }
bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
assert(!callee->is_static(), "change next line");
int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1);
Value receiver = state()->stack_at(index);
if (receiver->type()->is_constant()) {
ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
method_handle->set_callee(callee);
method_handle->set_caller(method());
// Get an adapter for the MethodHandle.
ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter();
if (method_handle_adapter != NULL) {
return try_inline(method_handle_adapter, /*holder_known=*/ true);
}
} else if (receiver->as_CheckCast()) {
// Match MethodHandle.selectAlternative idiom
Phi* phi = receiver->as_CheckCast()->obj()->as_Phi();
if (phi != NULL && phi->operand_count() == 2) {
// Get the two MethodHandle inputs from the Phi.
Value op1 = phi->operand_at(0);
Value op2 = phi->operand_at(1);
ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
mh1->set_callee(callee);
mh1->set_caller(method());
mh2->set_callee(callee);
mh2->set_caller(method());
// Get adapters for the MethodHandles.
ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
if (mh1_adapter != NULL && mh2_adapter != NULL) {
set_inline_cleanup_info();
// Build the If guard
BlockBegin* one = new BlockBegin(next_bci());
BlockBegin* two = new BlockBegin(next_bci());
BlockBegin* end = new BlockBegin(next_bci());
Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
block()->set_end(iff->as_BlockEnd());
// Connect up the states
one->merge(block()->end()->state());
two->merge(block()->end()->state());
// Save the state for the second inlinee
ValueStack* state_before = copy_state_before();
// Parse first adapter
_last = _block = one;
if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
// Parse second adapter
_last = _block = two;
_state = state_before;
if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
connect_to_end(end);
return true;
}
}
}
return false;
}
bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) {
// Get the MethodHandle from the CallSite.
ciCallSite* call_site = stream()->get_call_site();
ciMethodHandle* method_handle = call_site->get_target();
// Set the callee to have access to the class and signature in the
// MethodHandleCompiler.
method_handle->set_callee(callee);
method_handle->set_caller(method());
// Get an adapter for the MethodHandle.
ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter();
if (method_handle_adapter != NULL) {
if (try_inline(method_handle_adapter, /*holder_known=*/ true)) {
// Add a dependence for invalidation of the optimization.
if (!call_site->is_constant_call_site()) {
dependency_recorder()->assert_call_site_target_value(call_site, method_handle);
}
return true;
}
}
return false;
}
void GraphBuilder::inline_bailout(const char* msg) { void GraphBuilder::inline_bailout(const char* msg) {
assert(msg != NULL, "inline bailout msg must exist"); assert(msg != NULL, "inline bailout msg must exist");
_inline_bailout_msg = msg; _inline_bailout_msg = msg;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -315,9 +315,17 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block, ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block,
return_prev, return_prev,
return_state); } return_state); }
void set_inline_cleanup_info() {
set_inline_cleanup_info(_block, _last, _state);
}
BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); } BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); }
Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); } Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); }
ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); } ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); }
void restore_inline_cleanup_info() {
_block = inline_cleanup_block();
_last = inline_cleanup_return_prev();
_state = inline_cleanup_state();
}
void incr_num_returns() { scope_data()->incr_num_returns(); } void incr_num_returns() { scope_data()->incr_num_returns(); }
int num_returns() const { return scope_data()->num_returns(); } int num_returns() const { return scope_data()->num_returns(); }
intx max_inline_size() const { return scope_data()->max_inline_size(); } intx max_inline_size() const { return scope_data()->max_inline_size(); }
@ -331,9 +339,13 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
// inliners // inliners
bool try_inline( ciMethod* callee, bool holder_known); bool try_inline( ciMethod* callee, bool holder_known);
bool try_inline_intrinsics(ciMethod* callee); bool try_inline_intrinsics(ciMethod* callee);
bool try_inline_full (ciMethod* callee, bool holder_known); bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
bool try_inline_jsr(int jsr_dest_bci); bool try_inline_jsr(int jsr_dest_bci);
// JSR 292 support
bool for_method_handle_inline(ciMethod* callee);
bool for_invokedynamic_inline(ciMethod* callee);
// helpers // helpers
void inline_bailout(const char* msg); void inline_bailout(const char* msg);
BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state); BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);

View file

@ -514,28 +514,17 @@ Constant::CompareResult Constant::compare(Instruction::Condition cond, Value rig
void BlockBegin::set_end(BlockEnd* end) { void BlockBegin::set_end(BlockEnd* end) {
assert(end != NULL, "should not reset block end to NULL"); assert(end != NULL, "should not reset block end to NULL");
BlockEnd* old_end = _end; if (end == _end) {
if (end == old_end) {
return; return;
} }
// Must make the predecessors/successors match up with the clear_end();
// BlockEnd's notion.
int i, n;
if (old_end != NULL) {
// disconnect from the old end
old_end->set_begin(NULL);
// disconnect this block from it's current successors // Set the new end
for (i = 0; i < _successors.length(); i++) {
_successors.at(i)->remove_predecessor(this);
}
}
_end = end; _end = end;
_successors.clear(); _successors.clear();
// Now reset successors list based on BlockEnd // Now reset successors list based on BlockEnd
n = end->number_of_sux(); for (int i = 0; i < end->number_of_sux(); i++) {
for (i = 0; i < n; i++) {
BlockBegin* sux = end->sux_at(i); BlockBegin* sux = end->sux_at(i);
_successors.append(sux); _successors.append(sux);
sux->_predecessors.append(this); sux->_predecessors.append(this);
@ -544,6 +533,22 @@ void BlockBegin::set_end(BlockEnd* end) {
} }
void BlockBegin::clear_end() {
// Must make the predecessors/successors match up with the
// BlockEnd's notion.
if (_end != NULL) {
// disconnect from the old end
_end->set_begin(NULL);
// disconnect this block from it's current successors
for (int i = 0; i < _successors.length(); i++) {
_successors.at(i)->remove_predecessor(this);
}
_end = NULL;
}
}
void BlockBegin::disconnect_edge(BlockBegin* from, BlockBegin* to) { void BlockBegin::disconnect_edge(BlockBegin* from, BlockBegin* to) {
// disconnect any edges between from and to // disconnect any edges between from and to
#ifndef PRODUCT #ifndef PRODUCT

View file

@ -1601,6 +1601,7 @@ LEAF(BlockBegin, StateSplit)
void set_depth_first_number(int dfn) { _depth_first_number = dfn; } void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; } void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; }
void set_end(BlockEnd* end); void set_end(BlockEnd* end);
void clear_end();
void disconnect_from_graph(); void disconnect_from_graph();
static void disconnect_edge(BlockBegin* from, BlockBegin* to); static void disconnect_edge(BlockBegin* from, BlockBegin* to);
BlockBegin* insert_block_between(BlockBegin* sux); BlockBegin* insert_block_between(BlockBegin* sux);

View file

@ -121,7 +121,7 @@ void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
void LIR_Assembler::check_codespace() { void LIR_Assembler::check_codespace() {
CodeSection* cs = _masm->code_section(); CodeSection* cs = _masm->code_section();
if (cs->remaining() < (int)(1*K)) { if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
BAILOUT("CodeBuffer overflow"); BAILOUT("CodeBuffer overflow");
} }
} }

View file

@ -133,7 +133,6 @@ class LIR_Assembler: public CompilationResourceObj {
static bool is_small_constant(LIR_Opr opr); static bool is_small_constant(LIR_Opr opr);
static LIR_Opr receiverOpr(); static LIR_Opr receiverOpr();
static LIR_Opr incomingReceiverOpr();
static LIR_Opr osrBufferPointer(); static LIR_Opr osrBufferPointer();
// stubs // stubs

View file

@ -2404,7 +2404,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten"); assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
VMReg name = vm_reg_for_interval(interval); VMReg name = vm_reg_for_interval(interval);
map->set_oop(name); set_oop(map, name);
// Spill optimization: when the stack value is guaranteed to be always correct, // Spill optimization: when the stack value is guaranteed to be always correct,
// then it must be added to the oop map even if the interval is currently in a register // then it must be added to the oop map even if the interval is currently in a register
@ -2415,7 +2415,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned"); assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice"); assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
map->set_oop(frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs)); set_oop(map, frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
} }
} }
} }
@ -2424,7 +2424,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
assert(info->stack() != NULL, "CodeEmitInfo must always have a stack"); assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
int locks_count = info->stack()->total_locks_size(); int locks_count = info->stack()->total_locks_size();
for (int i = 0; i < locks_count; i++) { for (int i = 0; i < locks_count; i++) {
map->set_oop(frame_map()->monitor_object_regname(i)); set_oop(map, frame_map()->monitor_object_regname(i));
} }
return map; return map;

View file

@ -352,6 +352,13 @@ class LinearScan : public CompilationResourceObj {
MonitorValue* location_for_monitor_index(int monitor_index); MonitorValue* location_for_monitor_index(int monitor_index);
LocationValue* location_for_name(int name, Location::Type loc_type); LocationValue* location_for_name(int name, Location::Type loc_type);
void set_oop(OopMap* map, VMReg name) {
if (map->legal_vm_reg_name(name)) {
map->set_oop(name);
} else {
bailout("illegal oopMap register name");
}
}
int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values); int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values); int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);

View file

@ -375,14 +375,6 @@ JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDes
JRT_END JRT_END
JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
if (JvmtiExport::can_post_on_exceptions()) {
vframeStream vfst(thread, true);
address bcp = vfst.method()->bcp_from(vfst.bci());
JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
}
JRT_END
// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
// associated with the top activation record. The inlinee (that is possibly included in the enclosing // associated with the top activation record. The inlinee (that is possibly included in the enclosing
// method) method oop is passed as an argument. In order to do that it is embedded in the code as // method) method oop is passed as an argument. In order to do that it is embedded in the code as

View file

@ -65,7 +65,6 @@ class StubAssembler;
stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \ stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \
stub(access_field_patching) \ stub(access_field_patching) \
stub(load_klass_patching) \ stub(load_klass_patching) \
stub(jvmti_exception_throw) \
stub(g1_pre_barrier_slow) \ stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \ stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \ stub(fpu2long_stub) \
@ -141,7 +140,6 @@ class Runtime1: public AllStatic {
static void unimplemented_entry (JavaThread* thread, StubID id); static void unimplemented_entry (JavaThread* thread, StubID id);
static address exception_handler_for_pc(JavaThread* thread); static address exception_handler_for_pc(JavaThread* thread);
static void post_jvmti_exception_throw(JavaThread* thread);
static void throw_range_check_exception(JavaThread* thread, int index); static void throw_range_check_exception(JavaThread* thread, int index);
static void throw_index_exception(JavaThread* thread, int index); static void throw_index_exception(JavaThread* thread, int index);

View file

@ -278,7 +278,7 @@
product(intx, CompilationRepeat, 0, \ product(intx, CompilationRepeat, 0, \
"Number of times to recompile method before returning result") \ "Number of times to recompile method before returning result") \
\ \
develop(intx, NMethodSizeLimit, (32*K)*wordSize, \ develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
"Maximum size of a compiled method.") \ "Maximum size of a compiled method.") \
\ \
develop(bool, TraceFPUStack, false, \ develop(bool, TraceFPUStack, false, \

View file

@ -79,6 +79,17 @@ public:
assert(i < _limit, "out of Call Profile MorphismLimit"); assert(i < _limit, "out of Call Profile MorphismLimit");
return _receiver[i]; return _receiver[i];
} }
// Rescale the current profile based on the incoming scale
ciCallProfile rescale(double scale) {
assert(scale >= 0 && scale <= 1.0, "out of range");
ciCallProfile call = *this;
call._count = (int)(call._count * scale);
for (int i = 0; i < _morphism; i++) {
call._receiver_count[i] = (int)(call._receiver_count[i] * scale);
}
return call;
}
}; };
#endif // SHARE_VM_CI_CICALLPROFILE_HPP #endif // SHARE_VM_CI_CICALLPROFILE_HPP

View file

@ -46,9 +46,6 @@ private:
ciObject* _object; ciObject* _object;
} _value; } _value;
// Implementation of the print method.
void print_impl(outputStream* st);
public: public:
ciConstant() { ciConstant() {

View file

@ -884,19 +884,31 @@ bool ciEnv::system_dictionary_modification_counter_changed() {
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciEnv::check_for_system_dictionary_modification // ciEnv::validate_compile_task_dependencies
// Check for changes to the system dictionary during compilation //
// class loads, evolution, breakpoints // Check for changes during compilation (e.g. class loads, evolution,
void ciEnv::check_for_system_dictionary_modification(ciMethod* target) { // breakpoints, call site invalidation).
void ciEnv::validate_compile_task_dependencies(ciMethod* target) {
if (failing()) return; // no need for further checks if (failing()) return; // no need for further checks
// Dependencies must be checked when the system dictionary changes. // First, check non-klass dependencies as we might return early and
// If logging is enabled all violated dependences will be recorded in // not check klass dependencies if the system dictionary
// the log. In debug mode check dependencies even if the system // modification counter hasn't changed (see below).
// dictionary hasn't changed to verify that no invalid dependencies for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
// were inserted. Any violated dependences in this case are dumped to if (deps.is_klass_type()) continue; // skip klass dependencies
// the tty. klassOop witness = deps.check_dependency();
if (witness != NULL) {
record_failure("invalid non-klass dependency");
return;
}
}
// Klass dependencies must be checked when the system dictionary
// changes. If logging is enabled all violated dependences will be
// recorded in the log. In debug mode check dependencies even if
// the system dictionary hasn't changed to verify that no invalid
// dependencies were inserted. Any violated dependences in this
// case are dumped to the tty.
bool counter_changed = system_dictionary_modification_counter_changed(); bool counter_changed = system_dictionary_modification_counter_changed();
bool test_deps = counter_changed; bool test_deps = counter_changed;
DEBUG_ONLY(test_deps = true); DEBUG_ONLY(test_deps = true);
@ -904,22 +916,21 @@ void ciEnv::check_for_system_dictionary_modification(ciMethod* target) {
bool print_failures = false; bool print_failures = false;
DEBUG_ONLY(print_failures = !counter_changed); DEBUG_ONLY(print_failures = !counter_changed);
bool keep_going = (print_failures || xtty != NULL); bool keep_going = (print_failures || xtty != NULL);
int klass_violations = 0;
int violated = 0;
for (Dependencies::DepStream deps(dependencies()); deps.next(); ) { for (Dependencies::DepStream deps(dependencies()); deps.next(); ) {
if (!deps.is_klass_type()) continue; // skip non-klass dependencies
klassOop witness = deps.check_dependency(); klassOop witness = deps.check_dependency();
if (witness != NULL) { if (witness != NULL) {
++violated; klass_violations++;
if (print_failures) deps.print_dependency(witness, /*verbose=*/ true); if (print_failures) deps.print_dependency(witness, /*verbose=*/ true);
}
// If there's no log and we're not sanity-checking, we're done. // If there's no log and we're not sanity-checking, we're done.
if (!keep_going) break; if (!keep_going) break;
} }
}
if (violated != 0) { if (klass_violations != 0) {
assert(counter_changed, "failed dependencies, but counter didn't change"); assert(counter_changed, "failed dependencies, but counter didn't change");
record_failure("concurrent class loading"); record_failure("concurrent class loading");
} }
@ -938,7 +949,6 @@ void ciEnv::register_method(ciMethod* target,
ImplicitExceptionTable* inc_table, ImplicitExceptionTable* inc_table,
AbstractCompiler* compiler, AbstractCompiler* compiler,
int comp_level, int comp_level,
bool has_debug_info,
bool has_unsafe_access) { bool has_unsafe_access) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
nmethod* nm = NULL; nmethod* nm = NULL;
@ -978,8 +988,8 @@ void ciEnv::register_method(ciMethod* target,
// Encode the dependencies now, so we can check them right away. // Encode the dependencies now, so we can check them right away.
dependencies()->encode_content_bytes(); dependencies()->encode_content_bytes();
// Check for {class loads, evolution, breakpoints} during compilation // Check for {class loads, evolution, breakpoints, ...} during compilation
check_for_system_dictionary_modification(target); validate_compile_task_dependencies(target);
} }
methodHandle method(THREAD, target->get_methodOop()); methodHandle method(THREAD, target->get_methodOop());
@ -1033,7 +1043,6 @@ void ciEnv::register_method(ciMethod* target,
CompileBroker::handle_full_code_cache(); CompileBroker::handle_full_code_cache();
} }
} else { } else {
NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
nm->set_has_unsafe_access(has_unsafe_access); nm->set_has_unsafe_access(has_unsafe_access);
// Record successful registration. // Record successful registration.

View file

@ -247,9 +247,9 @@ private:
// Is this thread currently in the VM state? // Is this thread currently in the VM state?
static bool is_in_vm(); static bool is_in_vm();
// Helper routine for determining the validity of a compilation // Helper routine for determining the validity of a compilation with
// with respect to concurrent class loading. // respect to method dependencies (e.g. concurrent class loading).
void check_for_system_dictionary_modification(ciMethod* target); void validate_compile_task_dependencies(ciMethod* target);
public: public:
enum { enum {
@ -317,8 +317,7 @@ public:
ImplicitExceptionTable* inc_table, ImplicitExceptionTable* inc_table,
AbstractCompiler* compiler, AbstractCompiler* compiler,
int comp_level, int comp_level,
bool has_debug_info = true, bool has_unsafe_access);
bool has_unsafe_access = false);
// Access to certain well known ciObjects. // Access to certain well known ciObjects.

View file

@ -64,9 +64,6 @@ private:
// shared constructor code // shared constructor code
void initialize_from(fieldDescriptor* fd); void initialize_from(fieldDescriptor* fd);
// The implementation of the print method.
void print_impl(outputStream* st);
public: public:
ciFlags flags() { return _flags; } ciFlags flags() { return _flags; }
@ -178,7 +175,12 @@ public:
bool is_volatile () { return flags().is_volatile(); } bool is_volatile () { return flags().is_volatile(); }
bool is_transient () { return flags().is_transient(); } bool is_transient () { return flags().is_transient(); }
bool is_call_site_target() { return ((holder() == CURRENT_ENV->CallSite_klass()) && (name() == ciSymbol::target_name())); } bool is_call_site_target() {
ciInstanceKlass* callsite_klass = CURRENT_ENV->CallSite_klass();
if (callsite_klass == NULL)
return false;
return (holder()->is_subclass_of(callsite_klass) && (name() == ciSymbol::target_name()));
}
// Debugging output // Debugging output
void print(); void print();

View file

@ -1016,6 +1016,34 @@ int ciMethod::highest_osr_comp_level() {
return get_methodOop()->highest_osr_comp_level(); return get_methodOop()->highest_osr_comp_level();
} }
// ------------------------------------------------------------------
// ciMethod::code_size_for_inlining
//
// Code size for inlining decisions.
//
// Don't fully count method handle adapters against inlining budgets:
// the metric we use here is the number of call sites in the adapter
// as they are probably the instructions which generate some code.
int ciMethod::code_size_for_inlining() {
check_is_loaded();
// Method handle adapters
if (is_method_handle_adapter()) {
// Count call sites
int call_site_count = 0;
ciBytecodeStream iter(this);
while (iter.next() != ciBytecodeStream::EOBC()) {
if (Bytecodes::is_invoke(iter.cur_bc())) {
call_site_count++;
}
}
return call_site_count;
}
// Normal method
return code_size();
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethod::instructions_size // ciMethod::instructions_size
// //

View file

@ -157,6 +157,9 @@ class ciMethod : public ciObject {
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; } int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
// Code size for inlining decisions.
int code_size_for_inlining();
int comp_level(); int comp_level();
int highest_osr_comp_level(); int highest_osr_comp_level();

View file

@ -37,7 +37,7 @@
// ciMethodHandle::get_adapter // ciMethodHandle::get_adapter
// //
// Return an adapter for this MethodHandle. // Return an adapter for this MethodHandle.
ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const { ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
Handle h(get_oop()); Handle h(get_oop());
methodHandle callee(_callee->get_methodOop()); methodHandle callee(_callee->get_methodOop());
@ -73,7 +73,7 @@ ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
// ciMethodHandle::get_adapter // ciMethodHandle::get_adapter
// //
// Return an adapter for this MethodHandle. // Return an adapter for this MethodHandle.
ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) {
ciMethod* result = get_adapter_impl(is_invokedynamic); ciMethod* result = get_adapter_impl(is_invokedynamic);
if (result) { if (result) {
// Fake up the MDO maturity. // Fake up the MDO maturity.
@ -86,11 +86,22 @@ ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
} }
#ifndef PRODUCT
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethodHandle::print_impl // ciMethodHandle::print_chain_impl
// //
// Implementation of the print method. // Implementation of the print method.
void ciMethodHandle::print_impl(outputStream* st) { void ciMethodHandle::print_chain_impl(outputStream* st) {
st->print(" type="); ASSERT_IN_VM;
get_oop()->print(); MethodHandleChain::print(get_oop());
} }
// ------------------------------------------------------------------
// ciMethodHandle::print_chain
//
// Implementation of the print_chain method.
void ciMethodHandle::print_chain(outputStream* st) {
GUARDED_VM_ENTRY(print_chain_impl(st););
}
#endif

View file

@ -37,19 +37,23 @@ private:
ciMethod* _callee; ciMethod* _callee;
ciMethod* _caller; ciMethod* _caller;
ciCallProfile _profile; ciCallProfile _profile;
ciMethod* _method_handle_adapter;
ciMethod* _invokedynamic_adapter;
// Return an adapter for this MethodHandle. // Return an adapter for this MethodHandle.
ciMethod* get_adapter_impl(bool is_invokedynamic) const; ciMethod* get_adapter_impl(bool is_invokedynamic);
ciMethod* get_adapter( bool is_invokedynamic) const; ciMethod* get_adapter( bool is_invokedynamic);
protected: protected:
void print_impl(outputStream* st); void print_chain_impl(outputStream* st) PRODUCT_RETURN;
public: public:
ciMethodHandle(instanceHandle h_i) : ciMethodHandle(instanceHandle h_i) :
ciInstance(h_i), ciInstance(h_i),
_callee(NULL), _callee(NULL),
_caller(NULL) _caller(NULL),
_method_handle_adapter(NULL),
_invokedynamic_adapter(NULL)
{} {}
// What kind of ciObject is this? // What kind of ciObject is this?
@ -60,10 +64,22 @@ public:
void set_call_profile(ciCallProfile profile) { _profile = profile; } void set_call_profile(ciCallProfile profile) { _profile = profile; }
// Return an adapter for a MethodHandle call. // Return an adapter for a MethodHandle call.
ciMethod* get_method_handle_adapter() const { return get_adapter(false); } ciMethod* get_method_handle_adapter() {
if (_method_handle_adapter == NULL) {
_method_handle_adapter = get_adapter(false);
}
return _method_handle_adapter;
}
// Return an adapter for an invokedynamic call. // Return an adapter for an invokedynamic call.
ciMethod* get_invokedynamic_adapter() const { return get_adapter(true); } ciMethod* get_invokedynamic_adapter() {
if (_invokedynamic_adapter == NULL) {
_invokedynamic_adapter = get_adapter(true);
}
return _invokedynamic_adapter;
}
void print_chain(outputStream* st = tty) PRODUCT_RETURN;
}; };
#endif // SHARE_VM_CI_CIMETHODHANDLE_HPP #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP

View file

@ -194,16 +194,26 @@ bool ciObject::can_be_constant() {
// ciObject::should_be_constant() // ciObject::should_be_constant()
bool ciObject::should_be_constant() { bool ciObject::should_be_constant() {
if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant
if (!JavaObjectsInPerm && !is_null_object()) { if (is_null_object()) return true;
ciEnv* env = CURRENT_ENV;
if (!JavaObjectsInPerm) {
// We want Strings and Classes to be embeddable by default since // We want Strings and Classes to be embeddable by default since
// they used to be in the perm world. Not all Strings used to be // they used to be in the perm world. Not all Strings used to be
// embeddable but there's no easy way to distinguish the interned // embeddable but there's no easy way to distinguish the interned
// from the regulars ones so just treat them all that way. // from the regulars ones so just treat them all that way.
ciEnv* env = CURRENT_ENV;
if (klass() == env->String_klass() || klass() == env->Class_klass()) { if (klass() == env->String_klass() || klass() == env->Class_klass()) {
return true; return true;
} }
} }
if (EnableInvokeDynamic &&
(klass()->is_subclass_of(env->MethodHandle_klass()) ||
klass()->is_subclass_of(env->CallSite_klass()))) {
assert(ScavengeRootsInCode >= 1, "must be");
// We want to treat these aggressively.
return true;
}
return handle() == NULL || is_perm(); return handle() == NULL || is_perm();
} }

View file

@ -129,7 +129,8 @@ public:
// Return current ByteCode and increment PC to next bytecode, skipping all // Return current ByteCode and increment PC to next bytecode, skipping all
// intermediate constants. Returns EOBC at end. // intermediate constants. Returns EOBC at end.
// Expected usage: // Expected usage:
// while( (bc = iter.next()) != EOBC() ) { ... } // ciBytecodeStream iter(m);
// while (iter.next() != ciBytecodeStream::EOBC()) { ... }
Bytecodes::Code next() { Bytecodes::Code next() {
_bc_start = _pc; // Capture start of bc _bc_start = _pc; // Capture start of bc
if( _pc >= _end ) return EOBC(); // End-Of-Bytecodes if( _pc >= _end ) return EOBC(); // End-Of-Bytecodes

View file

@ -28,6 +28,7 @@
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
#include "code/debugInfo.hpp" #include "code/debugInfo.hpp"
#include "code/pcDesc.hpp" #include "code/pcDesc.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
@ -2323,6 +2324,8 @@ int java_lang_invoke_BoundMethodHandle::_vmargslot_offset;
int java_lang_invoke_AdapterMethodHandle::_conversion_offset; int java_lang_invoke_AdapterMethodHandle::_conversion_offset;
int java_lang_invoke_CountingMethodHandle::_vmcount_offset;
void java_lang_invoke_MethodHandle::compute_offsets() { void java_lang_invoke_MethodHandle::compute_offsets() {
klassOop k = SystemDictionary::MethodHandle_klass(); klassOop k = SystemDictionary::MethodHandle_klass();
if (k != NULL && EnableInvokeDynamic) { if (k != NULL && EnableInvokeDynamic) {
@ -2371,6 +2374,23 @@ void java_lang_invoke_AdapterMethodHandle::compute_offsets() {
} }
} }
void java_lang_invoke_CountingMethodHandle::compute_offsets() {
klassOop k = SystemDictionary::CountingMethodHandle_klass();
if (k != NULL && EnableInvokeDynamic) {
compute_offset(_vmcount_offset, k, vmSymbols::vmcount_name(), vmSymbols::int_signature(), true);
}
}
int java_lang_invoke_CountingMethodHandle::vmcount(oop mh) {
assert(is_instance(mh), "CMH only");
return mh->int_field(_vmcount_offset);
}
void java_lang_invoke_CountingMethodHandle::set_vmcount(oop mh, int count) {
assert(is_instance(mh), "CMH only");
mh->int_field_put(_vmcount_offset, count);
}
oop java_lang_invoke_MethodHandle::type(oop mh) { oop java_lang_invoke_MethodHandle::type(oop mh) {
return mh->obj_field(_type_offset); return mh->obj_field(_type_offset);
} }
@ -2674,6 +2694,17 @@ void java_lang_invoke_CallSite::compute_offsets() {
if (k != NULL) { if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature()); compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
} }
// Disallow compilation of CallSite.setTargetNormal and CallSite.setTargetVolatile
// (For C2: keep this until we have throttling logic for uncommon traps.)
if (k != NULL) {
instanceKlass* ik = instanceKlass::cast(k);
methodOop m_normal = ik->lookup_method(vmSymbols::setTargetNormal_name(), vmSymbols::setTarget_signature());
methodOop m_volatile = ik->lookup_method(vmSymbols::setTargetVolatile_name(), vmSymbols::setTarget_signature());
guarantee(m_normal && m_volatile, "must exist");
m_normal->set_not_compilable_quietly();
m_volatile->set_not_compilable_quietly();
}
} }
oop java_lang_invoke_CallSite::target(oop site) { oop java_lang_invoke_CallSite::target(oop site) {
@ -3031,6 +3062,7 @@ void JavaClasses::compute_offsets() {
java_lang_invoke_MethodType::compute_offsets(); java_lang_invoke_MethodType::compute_offsets();
java_lang_invoke_MethodTypeForm::compute_offsets(); java_lang_invoke_MethodTypeForm::compute_offsets();
java_lang_invoke_CallSite::compute_offsets(); java_lang_invoke_CallSite::compute_offsets();
java_lang_invoke_CountingMethodHandle::compute_offsets();
} }
java_security_AccessControlContext::compute_offsets(); java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes // Initialize reflection classes. The layouts of these classes

View file

@ -981,6 +981,34 @@ class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodH
}; };
// A simple class that maintains an invocation count
class java_lang_invoke_CountingMethodHandle: public java_lang_invoke_MethodHandle {
friend class JavaClasses;
private:
static int _vmcount_offset;
static void compute_offsets();
public:
// Accessors
static int vmcount(oop mh);
static void set_vmcount(oop mh, int count);
// Testers
static bool is_subclass(klassOop klass) {
return SystemDictionary::CountingMethodHandle_klass() != NULL &&
Klass::cast(klass)->is_subclass_of(SystemDictionary::CountingMethodHandle_klass());
}
static bool is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
// Accessors for code generation:
static int vmcount_offset_in_bytes() { return _vmcount_offset; }
};
// Interface to java.lang.invoke.MemberName objects // Interface to java.lang.invoke.MemberName objects
// (These are a private interface for Java code to query the class hierarchy.) // (These are a private interface for Java code to query the class hierarchy.)

View file

@ -155,6 +155,7 @@ class SymbolPropertyTable;
template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \ template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
template(CountingMethodHandle_klass, java_lang_invoke_CountingMethodHandle, Opt) \
template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \ template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \
template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \ template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \
template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \ template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \

View file

@ -218,6 +218,7 @@
template(returnType_name, "returnType") \ template(returnType_name, "returnType") \
template(signature_name, "signature") \ template(signature_name, "signature") \
template(slot_name, "slot") \ template(slot_name, "slot") \
template(selectAlternative_name, "selectAlternative") \
\ \
/* Support for annotations (JDK 1.5 and above) */ \ /* Support for annotations (JDK 1.5 and above) */ \
\ \
@ -246,9 +247,11 @@
template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \ template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \
template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \ template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \
template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \ template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \
template(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \
template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \ template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \
template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \ template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \ template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
template(java_lang_invoke_CountingMethodHandle, "java/lang/invoke/CountingMethodHandle") \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
@ -258,8 +261,12 @@
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \ template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \ template(makeDynamicCallSite_name, "makeDynamicCallSite") \
template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \ template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \
template(setTargetNormal_name, "setTargetNormal") \
template(setTargetVolatile_name, "setTargetVolatile") \
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \ NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \ LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
\ \
/* common method and field names */ \ /* common method and field names */ \
template(object_initializer_name, "<init>") \ template(object_initializer_name, "<init>") \
@ -344,6 +351,7 @@
template(vmmethod_name, "vmmethod") \ template(vmmethod_name, "vmmethod") \
template(vmtarget_name, "vmtarget") \ template(vmtarget_name, "vmtarget") \
template(vmentry_name, "vmentry") \ template(vmentry_name, "vmentry") \
template(vmcount_name, "vmcount") \
template(vmslots_name, "vmslots") \ template(vmslots_name, "vmslots") \
template(vmlayout_name, "vmlayout") \ template(vmlayout_name, "vmlayout") \
template(vmindex_name, "vmindex") \ template(vmindex_name, "vmindex") \
@ -907,6 +915,8 @@
do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \ do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \
do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \ do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \
\ \
do_intrinsic(_selectAlternative, java_lang_invoke_MethodHandleImpl, selectAlternative_name, selectAlternative_signature, F_S) \
\
/* unboxing methods: */ \ /* unboxing methods: */ \
do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \ do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \
do_name( booleanValue_name, "booleanValue") \ do_name( booleanValue_name, "booleanValue") \

View file

@ -113,9 +113,9 @@ void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
assert_common_1(no_finalizable_subclasses, ctxk); assert_common_1(no_finalizable_subclasses, ctxk);
} }
void Dependencies::assert_call_site_target_value(ciKlass* ctxk, ciCallSite* call_site, ciMethodHandle* method_handle) { void Dependencies::assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle) {
check_ctxk(ctxk); check_ctxk(call_site->klass());
assert_common_3(call_site_target_value, ctxk, call_site, method_handle); assert_common_2(call_site_target_value, call_site, method_handle);
} }
// Helper function. If we are adding a new dep. under ctxk2, // Helper function. If we are adding a new dep. under ctxk2,
@ -135,7 +135,7 @@ bool Dependencies::maybe_merge_ctxk(GrowableArray<ciObject*>* deps,
} }
} }
void Dependencies::assert_common_1(Dependencies::DepType dept, ciObject* x) { void Dependencies::assert_common_1(DepType dept, ciObject* x) {
assert(dep_args(dept) == 1, "sanity"); assert(dep_args(dept) == 1, "sanity");
log_dependency(dept, x); log_dependency(dept, x);
GrowableArray<ciObject*>* deps = _deps[dept]; GrowableArray<ciObject*>* deps = _deps[dept];
@ -148,21 +148,37 @@ void Dependencies::assert_common_1(Dependencies::DepType dept, ciObject* x) {
} }
} }
void Dependencies::assert_common_2(Dependencies::DepType dept, void Dependencies::assert_common_2(DepType dept,
ciKlass* ctxk, ciObject* x) { ciObject* x0, ciObject* x1) {
assert(dep_context_arg(dept) == 0, "sanity");
assert(dep_args(dept) == 2, "sanity"); assert(dep_args(dept) == 2, "sanity");
log_dependency(dept, ctxk, x); log_dependency(dept, x0, x1);
GrowableArray<ciObject*>* deps = _deps[dept]; GrowableArray<ciObject*>* deps = _deps[dept];
// see if the same (or a similar) dep is already recorded // see if the same (or a similar) dep is already recorded
if (note_dep_seen(dept, x)) { bool has_ctxk = has_explicit_context_arg(dept);
if (has_ctxk) {
assert(dep_context_arg(dept) == 0, "sanity");
if (note_dep_seen(dept, x1)) {
// look in this bucket for redundant assertions // look in this bucket for redundant assertions
const int stride = 2; const int stride = 2;
for (int i = deps->length(); (i -= stride) >= 0; ) { for (int i = deps->length(); (i -= stride) >= 0; ) {
ciObject* x1 = deps->at(i+1); ciObject* y1 = deps->at(i+1);
if (x == x1) { // same subject; check the context if (x1 == y1) { // same subject; check the context
if (maybe_merge_ctxk(deps, i+0, ctxk)) { if (maybe_merge_ctxk(deps, i+0, x0->as_klass())) {
return;
}
}
}
}
} else {
assert(dep_implicit_context_arg(dept) == 0, "sanity");
if (note_dep_seen(dept, x0) && note_dep_seen(dept, x1)) {
// look in this bucket for redundant assertions
const int stride = 2;
for (int i = deps->length(); (i -= stride) >= 0; ) {
ciObject* y0 = deps->at(i+0);
ciObject* y1 = deps->at(i+1);
if (x0 == y0 && x1 == y1) {
return; return;
} }
} }
@ -170,11 +186,11 @@ void Dependencies::assert_common_2(Dependencies::DepType dept,
} }
// append the assertion in the correct bucket: // append the assertion in the correct bucket:
deps->append(ctxk); deps->append(x0);
deps->append(x); deps->append(x1);
} }
void Dependencies::assert_common_3(Dependencies::DepType dept, void Dependencies::assert_common_3(DepType dept,
ciKlass* ctxk, ciObject* x, ciObject* x2) { ciKlass* ctxk, ciObject* x, ciObject* x2) {
assert(dep_context_arg(dept) == 0, "sanity"); assert(dep_context_arg(dept) == 0, "sanity");
assert(dep_args(dept) == 3, "sanity"); assert(dep_args(dept) == 3, "sanity");
@ -361,7 +377,7 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
3, // unique_concrete_subtypes_2 ctxk, k1, k2 3, // unique_concrete_subtypes_2 ctxk, k1, k2
3, // unique_concrete_methods_2 ctxk, m1, m2 3, // unique_concrete_methods_2 ctxk, m1, m2
1, // no_finalizable_subclasses ctxk 1, // no_finalizable_subclasses ctxk
3 // call_site_target_value ctxk, call_site, method_handle 2 // call_site_target_value call_site, method_handle
}; };
const char* Dependencies::dep_name(Dependencies::DepType dept) { const char* Dependencies::dep_name(Dependencies::DepType dept) {
@ -375,10 +391,7 @@ int Dependencies::dep_args(Dependencies::DepType dept) {
} }
void Dependencies::check_valid_dependency_type(DepType dept) { void Dependencies::check_valid_dependency_type(DepType dept) {
for (int deptv = (int) FIRST_TYPE; deptv < (int) TYPE_LIMIT; deptv++) { guarantee(FIRST_TYPE <= dept && dept < TYPE_LIMIT, err_msg("invalid dependency type: %d", (int) dept));
if (dept == ((DepType) deptv)) return;
}
ShouldNotReachHere();
} }
// for the sake of the compiler log, print out current dependencies: // for the sake of the compiler log, print out current dependencies:
@ -586,8 +599,7 @@ bool Dependencies::DepStream::next() {
code_byte -= ctxk_bit; code_byte -= ctxk_bit;
DepType dept = (DepType)code_byte; DepType dept = (DepType)code_byte;
_type = dept; _type = dept;
guarantee((dept - FIRST_TYPE) < (TYPE_LIMIT - FIRST_TYPE), Dependencies::check_valid_dependency_type(dept);
"bad dependency type tag");
int stride = _dep_args[dept]; int stride = _dep_args[dept];
assert(stride == dep_args(dept), "sanity"); assert(stride == dep_args(dept), "sanity");
int skipj = -1; int skipj = -1;
@ -615,18 +627,35 @@ oop Dependencies::DepStream::argument(int i) {
klassOop Dependencies::DepStream::context_type() { klassOop Dependencies::DepStream::context_type() {
assert(must_be_in_vm(), "raw oops here"); assert(must_be_in_vm(), "raw oops here");
int ctxkj = dep_context_arg(_type); // -1 if no context arg
if (ctxkj < 0) { // Most dependencies have an explicit context type argument.
return NULL; // for example, evol_method {
} else { int ctxkj = dep_context_arg(_type); // -1 if no explicit context arg
oop k = recorded_oop_at(_xi[ctxkj]); if (ctxkj >= 0) {
oop k = argument(ctxkj);
if (k != NULL) { // context type was not compressed away if (k != NULL) { // context type was not compressed away
assert(k->is_klass(), "type check"); assert(k->is_klass(), "type check");
return (klassOop) k; return (klassOop) k;
} else { // recompute "default" context type }
return ctxk_encoded_as_null(_type, recorded_oop_at(_xi[ctxkj+1])); // recompute "default" context type
return ctxk_encoded_as_null(_type, argument(ctxkj+1));
} }
} }
// Some dependencies are using the klass of the first object
// argument as implicit context type (e.g. call_site_target_value).
{
int ctxkj = dep_implicit_context_arg(_type);
if (ctxkj >= 0) {
oop k = argument(ctxkj)->klass();
assert(k->is_klass(), "type check");
return (klassOop) k;
}
}
// And some dependencies don't have a context type at all,
// e.g. evol_method.
return NULL;
} }
/// Checking dependencies: /// Checking dependencies:
@ -1409,21 +1438,20 @@ klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, KlassD
} }
klassOop Dependencies::check_call_site_target_value(klassOop ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes) { klassOop Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "sanity"); assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "sanity");
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity"); assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
if (changes == NULL) { if (changes == NULL) {
// Validate all CallSites // Validate all CallSites
if (java_lang_invoke_CallSite::target(call_site) != method_handle) if (java_lang_invoke_CallSite::target(call_site) != method_handle)
return ctxk; // assertion failed return call_site->klass(); // assertion failed
} else { } else {
// Validate the given CallSite // Validate the given CallSite
if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) { if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
assert(method_handle != changes->method_handle(), "must be"); assert(method_handle != changes->method_handle(), "must be");
return ctxk; // assertion failed return call_site->klass(); // assertion failed
} }
} }
assert(java_lang_invoke_CallSite::target(call_site) == method_handle, "should still be valid");
return NULL; // assertion still valid return NULL; // assertion still valid
} }
@ -1488,7 +1516,7 @@ klassOop Dependencies::DepStream::check_call_site_dependency(CallSiteDepChange*
klassOop witness = NULL; klassOop witness = NULL;
switch (type()) { switch (type()) {
case call_site_target_value: case call_site_target_value:
witness = check_call_site_target_value(context_type(), argument(1), argument(2), changes); witness = check_call_site_target_value(argument(0), argument(1), changes);
break; break;
default: default:
witness = NULL; witness = NULL;

View file

@ -167,8 +167,13 @@ class Dependencies: public ResourceObj {
// handy categorizations of dependency types: // handy categorizations of dependency types:
all_types = ((1 << TYPE_LIMIT) - 1) & ((-1) << FIRST_TYPE), all_types = ((1 << TYPE_LIMIT) - 1) & ((-1) << FIRST_TYPE),
non_klass_types = (1 << call_site_target_value),
klass_types = all_types & ~non_klass_types,
non_ctxk_types = (1 << evol_method), non_ctxk_types = (1 << evol_method),
ctxk_types = all_types & ~non_ctxk_types, implicit_ctxk_types = (1 << call_site_target_value),
explicit_ctxk_types = all_types & ~(non_ctxk_types | implicit_ctxk_types),
max_arg_count = 3, // current maximum number of arguments (incl. ctxk) max_arg_count = 3, // current maximum number of arguments (incl. ctxk)
@ -184,9 +189,15 @@ class Dependencies: public ResourceObj {
static const char* dep_name(DepType dept); static const char* dep_name(DepType dept);
static int dep_args(DepType dept); static int dep_args(DepType dept);
static int dep_context_arg(DepType dept) {
return dept_in_mask(dept, ctxk_types)? 0: -1; static bool is_klass_type( DepType dept) { return dept_in_mask(dept, klass_types ); }
}
static bool has_explicit_context_arg(DepType dept) { return dept_in_mask(dept, explicit_ctxk_types); }
static bool has_implicit_context_arg(DepType dept) { return dept_in_mask(dept, implicit_ctxk_types); }
static int dep_context_arg(DepType dept) { return has_explicit_context_arg(dept) ? 0 : -1; }
static int dep_implicit_context_arg(DepType dept) { return has_implicit_context_arg(dept) ? 0 : -1; }
static void check_valid_dependency_type(DepType dept); static void check_valid_dependency_type(DepType dept);
private: private:
@ -250,8 +261,8 @@ class Dependencies: public ResourceObj {
} }
void assert_common_1(DepType dept, ciObject* x); void assert_common_1(DepType dept, ciObject* x);
void assert_common_2(DepType dept, ciKlass* ctxk, ciObject* x); void assert_common_2(DepType dept, ciObject* x0, ciObject* x1);
void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x, ciObject* x2); void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x1, ciObject* x2);
public: public:
// Adding assertions to a new dependency set at compile time: // Adding assertions to a new dependency set at compile time:
@ -264,7 +275,7 @@ class Dependencies: public ResourceObj {
void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2); void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2);
void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2); void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2);
void assert_has_no_finalizable_subclasses(ciKlass* ctxk); void assert_has_no_finalizable_subclasses(ciKlass* ctxk);
void assert_call_site_target_value(ciKlass* ctxk, ciCallSite* call_site, ciMethodHandle* method_handle); void assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle);
// Define whether a given method or type is concrete. // Define whether a given method or type is concrete.
// These methods define the term "concrete" as used in this module. // These methods define the term "concrete" as used in this module.
@ -318,7 +329,7 @@ class Dependencies: public ResourceObj {
static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2, static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
KlassDepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes = NULL); static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes = NULL);
static klassOop check_call_site_target_value(klassOop ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes = NULL); static klassOop check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned klassOop is NULL if the dependency assertion is still // A returned klassOop is NULL if the dependency assertion is still
// valid. A non-NULL klassOop is a 'witness' to the assertion // valid. A non-NULL klassOop is a 'witness' to the assertion
// failure, a point in the class hierarchy where the assertion has // failure, a point in the class hierarchy where the assertion has
@ -455,6 +466,8 @@ class Dependencies: public ResourceObj {
oop argument(int i); // => recorded_oop_at(argument_index(i)) oop argument(int i); // => recorded_oop_at(argument_index(i))
klassOop context_type(); klassOop context_type();
bool is_klass_type() { return Dependencies::is_klass_type(type()); }
methodOop method_argument(int i) { methodOop method_argument(int i) {
oop x = argument(i); oop x = argument(i);
assert(x->is_method(), "type"); assert(x->is_method(), "type");

View file

@ -451,7 +451,6 @@ void nmethod::init_defaults() {
_stack_traversal_mark = 0; _stack_traversal_mark = 0;
_unload_reported = false; // jvmti state _unload_reported = false; // jvmti state
NOT_PRODUCT(_has_debug_info = false);
#ifdef ASSERT #ifdef ASSERT
_oops_are_stale = false; _oops_are_stale = false;
#endif #endif

View file

@ -191,8 +191,6 @@ class nmethod : public CodeBlob {
jbyte _scavenge_root_state; jbyte _scavenge_root_state;
NOT_PRODUCT(bool _has_debug_info; )
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
// and is not made into a zombie. However, once the nmethod is made into // and is not made into a zombie. However, once the nmethod is made into
// a zombie, it will be locked one final time if CompiledMethodUnload // a zombie, it will be locked one final time if CompiledMethodUnload
@ -329,11 +327,6 @@ class nmethod : public CodeBlob {
methodOop method() const { return _method; } methodOop method() const { return _method; }
AbstractCompiler* compiler() const { return _compiler; } AbstractCompiler* compiler() const { return _compiler; }
#ifndef PRODUCT
bool has_debug_info() const { return _has_debug_info; }
void set_has_debug_info(bool f) { _has_debug_info = false; }
#endif // NOT PRODUCT
// type info // type info
bool is_nmethod() const { return true; } bool is_nmethod() const { return true; }
bool is_java_method() const { return !method()->is_native(); } bool is_java_method() const { return !method()->is_native(); }

View file

@ -30,11 +30,10 @@
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) { PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
assert(sizeof(PcDescFlags) <= 4, "occupies more than a word");
_pc_offset = pc_offset; _pc_offset = pc_offset;
_scope_decode_offset = scope_decode_offset; _scope_decode_offset = scope_decode_offset;
_obj_decode_offset = obj_decode_offset; _obj_decode_offset = obj_decode_offset;
_flags.word = 0; _flags = 0;
} }
address PcDesc::real_pc(const nmethod* code) const { address PcDesc::real_pc(const nmethod* code) const {
@ -44,7 +43,7 @@ address PcDesc::real_pc(const nmethod* code) const {
void PcDesc::print(nmethod* code) { void PcDesc::print(nmethod* code) {
#ifndef PRODUCT #ifndef PRODUCT
ResourceMark rm; ResourceMark rm;
tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags.bits); tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags);
if (scope_decode_offset() == DebugInformationRecorder::serialized_null) { if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
return; return;

View file

@ -39,15 +39,17 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
int _scope_decode_offset; // offset for scope in nmethod int _scope_decode_offset; // offset for scope in nmethod
int _obj_decode_offset; int _obj_decode_offset;
union PcDescFlags { enum {
int word; PCDESC_reexecute = 1 << 0,
struct { PCDESC_is_method_handle_invoke = 1 << 1,
unsigned int reexecute: 1; PCDESC_return_oop = 1 << 2
unsigned int is_method_handle_invoke: 1; };
unsigned int return_oop: 1;
} bits; int _flags;
bool operator ==(const PcDescFlags& other) { return word == other.word; }
} _flags; void set_flag(int mask, bool z) {
_flags = z ? (_flags | mask) : (_flags & ~mask);
}
public: public:
int pc_offset() const { return _pc_offset; } int pc_offset() const { return _pc_offset; }
@ -69,8 +71,8 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
}; };
// Flags // Flags
bool should_reexecute() const { return _flags.bits.reexecute; } bool should_reexecute() const { return (_flags & PCDESC_reexecute) != 0; }
void set_should_reexecute(bool z) { _flags.bits.reexecute = z; } void set_should_reexecute(bool z) { set_flag(PCDESC_reexecute, z); }
// Does pd refer to the same information as pd? // Does pd refer to the same information as pd?
bool is_same_info(const PcDesc* pd) { bool is_same_info(const PcDesc* pd) {
@ -79,11 +81,11 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
_flags == pd->_flags; _flags == pd->_flags;
} }
bool is_method_handle_invoke() const { return _flags.bits.is_method_handle_invoke; } bool is_method_handle_invoke() const { return (_flags & PCDESC_is_method_handle_invoke) != 0; }
void set_is_method_handle_invoke(bool z) { _flags.bits.is_method_handle_invoke = z; } void set_is_method_handle_invoke(bool z) { set_flag(PCDESC_is_method_handle_invoke, z); }
bool return_oop() const { return _flags.bits.return_oop; } bool return_oop() const { return (_flags & PCDESC_return_oop) != 0; }
void set_return_oop(bool z) { _flags.bits.return_oop = z; } void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); }
// Returns the real pc // Returns the real pc
address real_pc(const nmethod* code) const; address real_pc(const nmethod* code) const;

View file

@ -157,8 +157,14 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
// ..and clear it. // ..and clear it.
Copy::zero_to_words(obj, new_tlab_size); Copy::zero_to_words(obj, new_tlab_size);
} else { } else {
// ...and clear just the allocated object. // ...and zap just allocated object.
Copy::zero_to_words(obj, size); #ifdef ASSERT
// Skip mangling the space corresponding to the object header to
// ensure that the returned space is not considered parsable by
// any concurrent GC thread.
size_t hdr_size = oopDesc::header_size();
Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
#endif // ASSERT
} }
thread->tlab().fill(obj, obj + size, new_tlab_size); thread->tlab().fill(obj, obj + size, new_tlab_size);
return obj; return obj;

View file

@ -287,7 +287,10 @@ oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_no_klass_install(klass, obj, size); post_allocation_setup_no_klass_install(klass, obj, size);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); #ifndef PRODUCT
const size_t hs = oopDesc::header_size();
Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs);
#endif
return (oop)obj; return (oop)obj;
} }

View file

@ -419,6 +419,8 @@ class Bytecodes: AllStatic {
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0 static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); } || code == _fconst_0 || code == _dconst_0); }
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
static int compute_flags (const char* format, int more_flags = 0); // compute the flags static int compute_flags (const char* format, int more_flags = 0); // compute the flags
static int flags (int code, bool is_wide) { static int flags (int code, bool is_wide) {
assert(code == (u_char)code, "must be a byte"); assert(code == (u_char)code, "must be a byte");

View file

@ -555,7 +555,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be"); assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
{ {
// Walk all nmethods depending on CallSite // Walk all nmethods depending on this call site.
MutexLocker mu(Compile_lock, thread); MutexLocker mu(Compile_lock, thread);
Universe::flush_dependents_on(call_site, method_handle); Universe::flush_dependents_on(call_site, method_handle);
} }

View file

@ -1203,12 +1203,12 @@ void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
// Compute the dependent nmethods that have a reference to a // Compute the dependent nmethods that have a reference to a
// CallSite object. We use instanceKlass::mark_dependent_nmethod // CallSite object. We use instanceKlass::mark_dependent_nmethod
// directly instead of CodeCache::mark_for_deoptimization because we // directly instead of CodeCache::mark_for_deoptimization because we
// want dependents on the class CallSite only not all classes in the // want dependents on the call site class only not all classes in
// ContextStream. // the ContextStream.
int marked = 0; int marked = 0;
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
instanceKlass* call_site_klass = instanceKlass::cast(SystemDictionary::CallSite_klass()); instanceKlass* call_site_klass = instanceKlass::cast(call_site->klass());
marked = call_site_klass->mark_dependent_nmethods(changes); marked = call_site_klass->mark_dependent_nmethods(changes);
} }
if (marked > 0) { if (marked > 0) {

View file

@ -172,11 +172,6 @@ void constMethodKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
assert(obj->is_constMethod(), "should be constMethod"); assert(obj->is_constMethod(), "should be constMethod");
constMethodOop cm_oop = constMethodOop(obj); constMethodOop cm_oop = constMethodOop(obj);
#if 0
PSParallelCompact::adjust_pointer(cm_oop->adr_method());
PSParallelCompact::adjust_pointer(cm_oop->adr_exception_table());
PSParallelCompact::adjust_pointer(cm_oop->adr_stackmap_data());
#endif
oop* const beg_oop = cm_oop->oop_block_beg(); oop* const beg_oop = cm_oop->oop_block_beg();
oop* const end_oop = cm_oop->oop_block_end(); oop* const end_oop = cm_oop->oop_block_end();
for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {

View file

@ -63,8 +63,10 @@ constantPoolCacheOop constantPoolCacheKlass::allocate(int length,
// CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL); // CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
oop obj = CollectedHeap::permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL); oop obj = CollectedHeap::permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj, #ifndef PRODUCT
size)); const size_t hs = oopDesc::header_size();
Universe::heap()->check_for_bad_heap_word_value(((HeapWord*) obj)+hs, size-hs);
#endif
constantPoolCacheOop cache = (constantPoolCacheOop) obj; constantPoolCacheOop cache = (constantPoolCacheOop) obj;
assert(!UseConcMarkSweepGC || obj->klass_or_null() == NULL, assert(!UseConcMarkSweepGC || obj->klass_or_null() == NULL,
"klass should be NULL here when using CMS"); "klass should be NULL here when using CMS");

View file

@ -600,6 +600,11 @@ public:
uint taken() { uint taken() {
return uint_at(taken_off_set); return uint_at(taken_off_set);
} }
void set_taken(uint cnt) {
set_uint_at(taken_off_set, cnt);
}
// Saturating counter // Saturating counter
uint inc_taken() { uint inc_taken() {
uint cnt = taken() + 1; uint cnt = taken() + 1;
@ -926,6 +931,10 @@ public:
return uint_at(not_taken_off_set); return uint_at(not_taken_off_set);
} }
void set_not_taken(uint cnt) {
set_uint_at(not_taken_off_set, cnt);
}
uint inc_not_taken() { uint inc_not_taken() {
uint cnt = not_taken() + 1; uint cnt = not_taken() + 1;
// Did we wrap? Will compiler screw us?? // Did we wrap? Will compiler screw us??

View file

@ -914,6 +914,7 @@ methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
Symbol* name, Symbol* name,
Symbol* signature, Symbol* signature,
Handle method_type, TRAPS) { Handle method_type, TRAPS) {
ResourceMark rm;
methodHandle empty; methodHandle empty;
assert(holder() == SystemDictionary::MethodHandle_klass(), assert(holder() == SystemDictionary::MethodHandle_klass(),

View file

@ -45,7 +45,7 @@ InlineTree::InlineTree(Compile* c,
_method(callee), _method(callee),
_site_invoke_ratio(site_invoke_ratio), _site_invoke_ratio(site_invoke_ratio),
_max_inline_level(max_inline_level), _max_inline_level(max_inline_level),
_count_inline_bcs(method()->code_size()) _count_inline_bcs(method()->code_size_for_inlining())
{ {
NOT_PRODUCT(_count_inlines = 0;) NOT_PRODUCT(_count_inlines = 0;)
if (_caller_jvms != NULL) { if (_caller_jvms != NULL) {
@ -107,7 +107,7 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
// positive filter: should send be inlined? returns NULL (--> yes) // positive filter: should send be inlined? returns NULL (--> yes)
// or rejection msg // or rejection msg
int size = callee_method->code_size(); int size = callee_method->code_size_for_inlining();
// Check for too many throws (and not too huge) // Check for too many throws (and not too huge)
if(callee_method->interpreter_throwout_count() > InlineThrowCount && if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
@ -141,8 +141,22 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
assert(mha_profile, "must exist"); assert(mha_profile, "must exist");
CounterData* cd = mha_profile->as_CounterData(); CounterData* cd = mha_profile->as_CounterData();
invoke_count = cd->count(); invoke_count = cd->count();
if (invoke_count == 0) {
return "method handle not reached";
}
if (_caller_jvms != NULL && _caller_jvms->method() != NULL &&
_caller_jvms->method()->method_data() != NULL &&
!_caller_jvms->method()->method_data()->is_empty()) {
ciMethodData* mdo = _caller_jvms->method()->method_data();
ciProfileData* mha_profile = mdo->bci_to_data(_caller_jvms->bci());
assert(mha_profile, "must exist");
CounterData* cd = mha_profile->as_CounterData();
call_site_count = cd->count();
} else {
call_site_count = invoke_count; // use the same value call_site_count = invoke_count; // use the same value
} }
}
assert(invoke_count != 0, "require invocation count greater than zero"); assert(invoke_count != 0, "require invocation count greater than zero");
int freq = call_site_count / invoke_count; int freq = call_site_count / invoke_count;
@ -244,7 +258,7 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
} }
// use frequency-based objections only for non-trivial methods // use frequency-based objections only for non-trivial methods
if (callee_method->code_size() <= MaxTrivialSize) return NULL; if (callee_method->code_size_for_inlining() <= MaxTrivialSize) return NULL;
// don't use counts with -Xcomp or CTW // don't use counts with -Xcomp or CTW
if (UseInterpreter && !CompileTheWorld) { if (UseInterpreter && !CompileTheWorld) {
@ -305,7 +319,7 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
} }
// suppress a few checks for accessors and trivial methods // suppress a few checks for accessors and trivial methods
if (callee_method->code_size() > MaxTrivialSize) { if (callee_method->code_size_for_inlining() > MaxTrivialSize) {
// don't inline into giant methods // don't inline into giant methods
if (C->unique() > (uint)NodeCountInliningCutoff) { if (C->unique() > (uint)NodeCountInliningCutoff) {
@ -349,7 +363,7 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
} }
} }
int size = callee_method->code_size(); int size = callee_method->code_size_for_inlining();
if (UseOldInlining && ClipInlining if (UseOldInlining && ClipInlining
&& (int)count_inline_bcs() + size >= DesiredMethodLimit) { && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
@ -394,6 +408,16 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
return true; return true;
} }
//------------------------------check_can_parse--------------------------------
const char* InlineTree::check_can_parse(ciMethod* callee) {
// Certain methods cannot be parsed at all:
if ( callee->is_native()) return "native method";
if (!callee->can_be_compiled()) return "not compilable (disabled)";
if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)";
if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)";
return NULL;
}
//------------------------------print_inlining--------------------------------- //------------------------------print_inlining---------------------------------
// Really, the failure_msg can be a success message also. // Really, the failure_msg can be a success message also.
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
@ -423,6 +447,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
int caller_bci = jvms->bci(); int caller_bci = jvms->bci();
ciMethod *caller_method = jvms->method(); ciMethod *caller_method = jvms->method();
// Do some initial checks.
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
if (PrintInlining) { if (PrintInlining) {
failure_msg = "failed_initial_checks"; failure_msg = "failed_initial_checks";
@ -431,6 +456,13 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
return NULL; return NULL;
} }
// Do some parse checks.
failure_msg = check_can_parse(callee_method);
if (failure_msg != NULL) {
if (PrintInlining) print_inlining(callee_method, caller_bci, failure_msg);
return NULL;
}
// Check if inlining policy says no. // Check if inlining policy says no.
WarmCallInfo wci = *(initial_wci); WarmCallInfo wci = *(initial_wci);
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci); failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);

View file

@ -61,12 +61,9 @@ public:
{ {
_is_osr = is_osr; _is_osr = is_osr;
_expected_uses = expected_uses; _expected_uses = expected_uses;
assert(can_parse(method, is_osr), "parse must be possible"); assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
} }
// Can we build either an OSR or a regular parser for this method?
static bool can_parse(ciMethod* method, int is_osr = false);
virtual bool is_parse() const { return true; } virtual bool is_parse() const { return true; }
virtual JVMState* generate(JVMState* jvms); virtual JVMState* generate(JVMState* jvms);
int is_osr() { return _is_osr; } int is_osr() { return _is_osr; }
@ -152,7 +149,6 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
call->set_optimized_virtual(true); call->set_optimized_virtual(true);
if (method()->is_method_handle_invoke()) { if (method()->is_method_handle_invoke()) {
call->set_method_handle_invoke(true); call->set_method_handle_invoke(true);
kit.C->set_has_method_handle_invokes(true);
} }
} }
kit.set_arguments_for_java_call(call); kit.set_arguments_for_java_call(call);
@ -210,7 +206,6 @@ JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
call->set_optimized_virtual(true); call->set_optimized_virtual(true);
// Take extra care (in the presence of argument motion) not to trash the SP: // Take extra care (in the presence of argument motion) not to trash the SP:
call->set_method_handle_invoke(true); call->set_method_handle_invoke(true);
kit.C->set_has_method_handle_invokes(true);
// Pass the target MethodHandle as first argument and shift the // Pass the target MethodHandle as first argument and shift the
// other arguments. // other arguments.
@ -303,20 +298,8 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
return kit.transfer_exceptions_into_jvms(); return kit.transfer_exceptions_into_jvms();
} }
bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
// Certain methods cannot be parsed at all:
if (!m->can_be_compiled()) return false;
if (!m->has_balanced_monitors()) return false;
if (m->get_flow_analysis()->failing()) return false;
// (Methods may bail out for other reasons, after the parser is run.
// We try to avoid this, but if forced, we must return (Node*)NULL.
// The user of the CallGenerator must check for this condition.)
return true;
}
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
if (!ParseGenerator::can_parse(m)) return NULL; if (InlineTree::check_can_parse(m) != NULL) return NULL;
return new ParseGenerator(m, expected_uses); return new ParseGenerator(m, expected_uses);
} }
@ -324,7 +307,7 @@ CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
// for the method execution already in progress, not just the JVMS // for the method execution already in progress, not just the JVMS
// of the caller. Thus, this CallGenerator cannot be mixed with others! // of the caller. Thus, this CallGenerator cannot be mixed with others!
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
if (!ParseGenerator::can_parse(m, true)) return NULL; if (InlineTree::check_can_parse(m) != NULL) return NULL;
float past_uses = m->interpreter_invocation_count(); float past_uses = m->interpreter_invocation_count();
float expected_uses = past_uses; float expected_uses = past_uses;
return new ParseGenerator(m, expected_uses, true); return new ParseGenerator(m, expected_uses, true);
@ -336,7 +319,7 @@ CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj
} }
CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch"); assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
return new DynamicCallGenerator(m); return new DynamicCallGenerator(m);
} }
@ -715,24 +698,36 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
// Get an adapter for the MethodHandle. // Get an adapter for the MethodHandle.
ciMethod* target_method = method_handle->get_method_handle_adapter(); ciMethod* target_method = method_handle->get_method_handle_adapter();
if (target_method != NULL) { if (target_method != NULL) {
CallGenerator* hit_cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, 1); CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
if (hit_cg != NULL && hit_cg->is_inline()) if (cg != NULL && cg->is_inline())
return hit_cg; return cg;
} }
} else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 && } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) { method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
float prob = PROB_FAIR;
Node* meth_region = method_handle->in(0);
if (meth_region->is_Region() &&
meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() &&
meth_region->in(1)->in(0) == meth_region->in(2)->in(0) &&
meth_region->in(1)->in(0)->is_If()) {
// If diamond, so grab the probability of the test to drive the inlining below
prob = meth_region->in(1)->in(0)->as_If()->_prob;
if (meth_region->in(1)->is_IfTrue()) {
prob = 1 - prob;
}
}
// selectAlternative idiom merging two constant MethodHandles. // selectAlternative idiom merging two constant MethodHandles.
// Generate a guard so that each can be inlined. We might want to // Generate a guard so that each can be inlined. We might want to
// do more inputs at later point but this gets the most common // do more inputs at later point but this gets the most common
// case. // case.
CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
if (cg1 != NULL && cg2 != NULL) {
const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr(); const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
ciObject* const_oop = oop_ptr->const_oop(); ciObject* const_oop = oop_ptr->const_oop();
ciMethodHandle* mh = const_oop->as_method_handle(); ciMethodHandle* mh = const_oop->as_method_handle();
return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob);
CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile);
CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile);
if (cg1 != NULL && cg2 != NULL) {
return new PredictedDynamicCallGenerator(mh, cg2, cg1, PROB_FAIR);
} }
} }
return NULL; return NULL;
@ -741,7 +736,6 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
ciMethod* caller, ciMethod* callee, ciCallProfile profile) { ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
assert(call_site->is_constant_call_site() || call_site->is_mutable_call_site(), "must be");
ciMethodHandle* method_handle = call_site->get_target(); ciMethodHandle* method_handle = call_site->get_target();
// Set the callee to have access to the class and signature in the // Set the callee to have access to the class and signature in the
@ -754,13 +748,13 @@ CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JV
ciMethod* target_method = method_handle->get_invokedynamic_adapter(); ciMethod* target_method = method_handle->get_invokedynamic_adapter();
if (target_method != NULL) { if (target_method != NULL) {
Compile *C = Compile::current(); Compile *C = Compile::current();
CallGenerator* hit_cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
if (hit_cg != NULL && hit_cg->is_inline()) { if (cg != NULL && cg->is_inline()) {
// Add a dependence for invalidation of the optimization. // Add a dependence for invalidation of the optimization.
if (call_site->is_mutable_call_site()) { if (!call_site->is_constant_call_site()) {
C->dependencies()->assert_call_site_target_value(C->env()->CallSite_klass(), call_site, method_handle); C->dependencies()->assert_call_site_target_value(call_site, method_handle);
} }
return hit_cg; return cg;
} }
} }
return NULL; return NULL;

View file

@ -817,7 +817,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
&_handler_table, &_inc_table, &_handler_table, &_inc_table,
compiler, compiler,
env()->comp_level(), env()->comp_level(),
true, /*has_debug_info*/
has_unsafe_access() has_unsafe_access()
); );
} }

View file

@ -496,14 +496,6 @@ public:
virtual bool depends_only_on_test() const { return false; } virtual bool depends_only_on_test() const { return false; }
}; };
//------------------------------MemMoveNode------------------------------------
// Memory to memory move. Inserted very late, after allocation.
class MemMoveNode : public Node {
public:
MemMoveNode( Node *dst, Node *src ) : Node(0,dst,src) {}
virtual int Opcode() const;
};
//------------------------------ThreadLocalNode-------------------------------- //------------------------------ThreadLocalNode--------------------------------
// Ideal Node which returns the base of ThreadLocalStorage. // Ideal Node which returns the base of ThreadLocalStorage.
class ThreadLocalNode : public Node { class ThreadLocalNode : public Node {

View file

@ -136,16 +136,10 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
ciCallSite* call_site = str.get_call_site(); ciCallSite* call_site = str.get_call_site();
// Inline constant and mutable call sites. We don't inline
// volatile call sites optimistically since they are specified
// to change their value often and that would result in a lot of
// deoptimizations and recompiles.
if (call_site->is_constant_call_site() || call_site->is_mutable_call_site()) {
CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile); CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile);
if (cg != NULL) { if (cg != NULL) {
return cg; return cg;
} }
}
// If something failed, generate a normal dynamic call. // If something failed, generate a normal dynamic call.
return CallGenerator::for_dynamic_call(call_method); return CallGenerator::for_dynamic_call(call_method);
} }

Some files were not shown because too many files have changed in this diff Show more