This commit is contained in:
Abhijit Saha 2012-09-07 18:18:55 -07:00
commit 38b305878c
2035 changed files with 444475 additions and 31230 deletions

View file

@ -172,3 +172,8 @@ b820143a6f1ce993c6e6f31db4d64de990f42654 jdk8-b47
086271e35b0a419b38e8bda9bebd70693811df0a jdk8-b48 086271e35b0a419b38e8bda9bebd70693811df0a jdk8-b48
cecd7026f30cbd83b0601925a7a5e059aec98138 jdk8-b49 cecd7026f30cbd83b0601925a7a5e059aec98138 jdk8-b49
38fe5ab028908cf64dd73a43336ba3211577bfc3 jdk8-b50 38fe5ab028908cf64dd73a43336ba3211577bfc3 jdk8-b50
382651d28f2502d371eca751962232c0e535e57a jdk8-b51
b67041a6cb508da18d2f5c7687e6a31e08bea4fc jdk8-b52
c7aa5cca1c01689a7b1a92411daf83684af05a33 jdk8-b53
7c6aa31ff1b2ae48c1c686ebe1aadf0c3da5be15 jdk8-b54
319f583f66db47395fa86127dd3ddb729eb7c64f jdk8-b55

View file

@ -172,3 +172,8 @@ e4f81a817447c3a4f6868f083c81c2fb1b15d44c jdk8-b44
3f6c72d1c2a6e5c9e7d81c3dc984886678a128ad jdk8-b48 3f6c72d1c2a6e5c9e7d81c3dc984886678a128ad jdk8-b48
c97b99424815c43818e3cc3ffcdd1a60f3198b52 jdk8-b49 c97b99424815c43818e3cc3ffcdd1a60f3198b52 jdk8-b49
2fd67618b9a3c847780ed7b9d228e862b6e2824c jdk8-b50 2fd67618b9a3c847780ed7b9d228e862b6e2824c jdk8-b50
57c0aee7309050b9d6cfcbd202dc704e9260b377 jdk8-b51
8d24def5ceb3b8f2e857f2e18b2804fc59eecf8d jdk8-b52
febd7ff5280067ca482faaeb9418ae88764c1a35 jdk8-b53
c1a277c6022affbc6855bdfb039511e73fbe2395 jdk8-b54
b85b44cced2406792cfb9baab1377ff03e7001d8 jdk8-b55

View file

@ -172,3 +172,8 @@ cd879aff5d3cc1f58829aab3116880aa19525b78 jdk8-b43
7e2b179a5b4dbd3f097e28daa00abfcc72ba3e0b jdk8-b48 7e2b179a5b4dbd3f097e28daa00abfcc72ba3e0b jdk8-b48
fe44e58a6bdbeae350ce96aafb49770a5dca5d8a jdk8-b49 fe44e58a6bdbeae350ce96aafb49770a5dca5d8a jdk8-b49
d20d9eb9f093adbf392918c703960ad24c93a331 jdk8-b50 d20d9eb9f093adbf392918c703960ad24c93a331 jdk8-b50
9b0f841ca9f7ee9bacf16a5ab41c4f829276bc6b jdk8-b51
80689ff9cb499837513f18a1136dac7f0686cd55 jdk8-b52
63aeb7a2472fb299134ad7388e0a111a5340b02d jdk8-b53
16c82fc74695bab9b9e0fb05c086a5a08ba0082f jdk8-b54
e8a0e84383d6fbd303ce44bd355fb25972b13286 jdk8-b55

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2004, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,10 @@
package com.sun.corba.se.impl.transport; package com.sun.corba.se.impl.transport;
import java.util.Hashtable; import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.omg.CORBA.CompletionStatus; import org.omg.CORBA.CompletionStatus;
import org.omg.CORBA.SystemException; import org.omg.CORBA.SystemException;
@ -68,7 +71,7 @@ public class CorbaResponseWaitingRoomImpl
private CorbaConnection connection; private CorbaConnection connection;
// Maps requestId to an OutCallDesc. // Maps requestId to an OutCallDesc.
private Hashtable out_calls = null; // REVISIT - use int hastable/map final private Map<Integer, OutCallDesc> out_calls;
public CorbaResponseWaitingRoomImpl(ORB orb, CorbaConnection connection) public CorbaResponseWaitingRoomImpl(ORB orb, CorbaConnection connection)
{ {
@ -76,7 +79,8 @@ public class CorbaResponseWaitingRoomImpl
wrapper = ORBUtilSystemException.get( orb, wrapper = ORBUtilSystemException.get( orb,
CORBALogDomains.RPC_TRANSPORT ) ; CORBALogDomains.RPC_TRANSPORT ) ;
this.connection = connection; this.connection = connection;
out_calls = new Hashtable(); out_calls =
Collections.synchronizedMap(new HashMap<Integer, OutCallDesc>());
} }
//////////////////////////////////////////////////// ////////////////////////////////////////////////////
@ -139,7 +143,7 @@ public class CorbaResponseWaitingRoomImpl
return null; return null;
} }
OutCallDesc call = (OutCallDesc)out_calls.get(requestId); OutCallDesc call = out_calls.get(requestId);
if (call == null) { if (call == null) {
throw wrapper.nullOutCall(CompletionStatus.COMPLETED_MAYBE); throw wrapper.nullOutCall(CompletionStatus.COMPLETED_MAYBE);
} }
@ -197,7 +201,7 @@ public class CorbaResponseWaitingRoomImpl
LocateReplyOrReplyMessage header = (LocateReplyOrReplyMessage) LocateReplyOrReplyMessage header = (LocateReplyOrReplyMessage)
inputObject.getMessageHeader(); inputObject.getMessageHeader();
Integer requestId = new Integer(header.getRequestId()); Integer requestId = new Integer(header.getRequestId());
OutCallDesc call = (OutCallDesc) out_calls.get(requestId); OutCallDesc call = out_calls.get(requestId);
if (orb.transportDebugFlag) { if (orb.transportDebugFlag) {
dprint(".responseReceived: id/" dprint(".responseReceived: id/"
@ -248,7 +252,6 @@ public class CorbaResponseWaitingRoomImpl
public int numberRegistered() public int numberRegistered()
{ {
// Note: Hashtable.size() is not synchronized
return out_calls.size(); return out_calls.size();
} }
@ -264,29 +267,41 @@ public class CorbaResponseWaitingRoomImpl
dprint(".signalExceptionToAllWaiters: " + systemException); dprint(".signalExceptionToAllWaiters: " + systemException);
} }
OutCallDesc call; synchronized (out_calls) {
java.util.Enumeration e = out_calls.elements(); if (orb.transportDebugFlag) {
while(e.hasMoreElements()) { dprint(".signalExceptionToAllWaiters: out_calls size :" +
call = (OutCallDesc) e.nextElement(); out_calls.size());
}
synchronized(call.done){ for (OutCallDesc call : out_calls.values()) {
// anything waiting for BufferManagerRead's fragment queue if (orb.transportDebugFlag) {
// needs to be cancelled dprint(".signalExceptionToAllWaiters: signaling " +
CorbaMessageMediator corbaMsgMediator = call);
(CorbaMessageMediator)call.messageMediator; }
CDRInputObject inputObject = synchronized(call.done) {
(CDRInputObject)corbaMsgMediator.getInputObject(); try {
// IMPORTANT: If inputObject is null, then no need to tell // anything waiting for BufferManagerRead's fragment queue
// BufferManagerRead to cancel request processing. // needs to be cancelled
if (inputObject != null) { CorbaMessageMediator corbaMsgMediator =
BufferManagerReadStream bufferManager = (CorbaMessageMediator)call.messageMediator;
(BufferManagerReadStream)inputObject.getBufferManager(); CDRInputObject inputObject =
int requestId = corbaMsgMediator.getRequestId(); (CDRInputObject)corbaMsgMediator.getInputObject();
bufferManager.cancelProcessing(requestId); // IMPORTANT: If inputObject is null, then no need to tell
// BufferManagerRead to cancel request processing.
if (inputObject != null) {
BufferManagerReadStream bufferManager =
(BufferManagerReadStream)inputObject.getBufferManager();
int requestId = corbaMsgMediator.getRequestId();
bufferManager.cancelProcessing(requestId);
}
} catch (Exception e) {
} finally {
// attempt to wake up waiting threads in all cases
call.inputObject = null;
call.exception = systemException;
call.done.notifyAll();
}
} }
call.inputObject = null;
call.exception = systemException;
call.done.notify();
} }
} }
} }
@ -294,7 +309,7 @@ public class CorbaResponseWaitingRoomImpl
public MessageMediator getMessageMediator(int requestId) public MessageMediator getMessageMediator(int requestId)
{ {
Integer id = new Integer(requestId); Integer id = new Integer(requestId);
OutCallDesc call = (OutCallDesc) out_calls.get(id); OutCallDesc call = out_calls.get(id);
if (call == null) { if (call == null) {
// This can happen when getting early reply fragments for a // This can happen when getting early reply fragments for a
// request which has completed (e.g., client marshaling error). // request which has completed (e.g., client marshaling error).

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1521,7 +1521,7 @@ public class SocketOrChannelConnectionImpl
// connection and give them the SystemException; // connection and give them the SystemException;
responseWaitingRoom.signalExceptionToAllWaiters(systemException); responseWaitingRoom.signalExceptionToAllWaiters(systemException);
} finally {
if (contactInfo != null) { if (contactInfo != null) {
((OutboundConnectionCache)getConnectionCache()).remove(contactInfo); ((OutboundConnectionCache)getConnectionCache()).remove(contactInfo);
} else if (acceptor != null) { } else if (acceptor != null) {
@ -1542,7 +1542,6 @@ public class SocketOrChannelConnectionImpl
writeUnlock(); writeUnlock();
} finally {
if (orb.transportDebugFlag) { if (orb.transportDebugFlag) {
dprint(".purgeCalls<-: " dprint(".purgeCalls<-: "
+ minor_code + "/" + die + "/" + lockHeld + minor_code + "/" + die + "/" + lockHeld

View file

@ -268,3 +268,10 @@ e3619706a7253540a2d94e9e841acaab8ace7038 jdk8-b49
58f237a9e83af6ded0d2e2c81d252cd47c0f4c45 jdk8-b50 58f237a9e83af6ded0d2e2c81d252cd47c0f4c45 jdk8-b50
3b3ad16429701b2eb6712851c2f7c5a726eb2cbe hs24-b19 3b3ad16429701b2eb6712851c2f7c5a726eb2cbe hs24-b19
663fc23da8d51c4c0552cbcb17ffc85f5869d4fd jdk8-b51 663fc23da8d51c4c0552cbcb17ffc85f5869d4fd jdk8-b51
4c8f2a12e757e7a808aa85827573e09f75d7459f hs24-b20
6d0436885201db3f581523344a734793bb989549 jdk8-b52
54240c1b8e87758f28da2c6a569a926fd9e0910a jdk8-b53
9e3ae661284dc04185b029d85440fe7811f1ed07 hs24-b21
e8fb566b94667f88462164defa654203f0ab6820 jdk8-b54
09ea7e0752b306b8ae74713aeb4eb6263e1c6836 hs24-b22
af0c8a0808516317333dcf9af15567cdd52761ce jdk8-b55

View file

@ -26,7 +26,7 @@
# This file sets common environment variables for all SA scripts # This file sets common environment variables for all SA scripts
OS=`uname` OS=`uname`
STARTDIR=`dirname $0` STARTDIR=`(cd \`dirname $0 \`; pwd)`
ARCH=`uname -m` ARCH=`uname -m`
if [ "x$SA_JAVA" = "x" ]; then if [ "x$SA_JAVA" = "x" ]; then

View file

@ -25,10 +25,11 @@
. `dirname $0`/saenv.sh . `dirname $0`/saenv.sh
if [ -f $STARTDIR/sa.jar ] ; then if [ -f $STARTDIR/../lib/sa-jdi.jar ] ; then
CP=$STARTDIR/sa.jar CP=$STARTDIR/../lib/sa-jdi.jar
else else
CP=$STARTDIR/../build/classes CP=$STARTDIR/../build/classes
fi fi
$SA_JAVA -classpath $CP ${OPTIONS} -Djava.rmi.server.codebase=file:/$CP -Djava.security.policy=$STARTDIR\/grantAll.policy sun.jvm.hotspot.DebugServer $* $STARTDIR/java -classpath $CP ${OPTIONS} -Djava.rmi.server.codebase=file://$CP -Djava.security.policy=${STARTDIR}/grantAll.policy sun.jvm.hotspot.DebugServer $*

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,11 +55,11 @@ static jmethodID listAdd_ID = 0;
#define THROW_NEW_DEBUGGER_EXCEPTION_(str, value) { throw_new_debugger_exception(env, str); return value; } #define THROW_NEW_DEBUGGER_EXCEPTION_(str, value) { throw_new_debugger_exception(env, str); return value; }
#define THROW_NEW_DEBUGGER_EXCEPTION(str) { throw_new_debugger_exception(env, str); return;} #define THROW_NEW_DEBUGGER_EXCEPTION(str) { throw_new_debugger_exception(env, str); return;}
static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) { void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
(*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg); (*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
} }
static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) { struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
jlong ptr = (*env)->GetLongField(env, this_obj, p_ps_prochandle_ID); jlong ptr = (*env)->GetLongField(env, this_obj, p_ps_prochandle_ID);
return (struct ps_prochandle*)(intptr_t)ptr; return (struct ps_prochandle*)(intptr_t)ptr;
} }
@ -280,6 +280,7 @@ JNIEXPORT jbyteArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
return (err == PS_OK)? array : 0; return (err == PS_OK)? array : 0;
} }
#if defined(i386) || defined(ia64) || defined(amd64) || defined(sparc) || defined(sparcv9)
JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0
(JNIEnv *env, jobject this_obj, jint lwp_id) { (JNIEnv *env, jobject this_obj, jint lwp_id) {
@ -410,3 +411,4 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
(*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT); (*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT);
return array; return array;
} }
#endif

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,10 +25,15 @@
#ifndef _LIBPROC_H_ #ifndef _LIBPROC_H_
#define _LIBPROC_H_ #define _LIBPROC_H_
#include <jni.h>
#include <unistd.h> #include <unistd.h>
#include <stdint.h> #include <stdint.h>
#include "proc_service.h" #include "proc_service.h"
#if defined(arm) || defined(ppc)
#include "libproc_md.h"
#endif
#if defined(sparc) || defined(sparcv9) #if defined(sparc) || defined(sparcv9)
/* /*
If _LP64 is defined ptrace.h should be taken from /usr/include/asm-sparc64 If _LP64 is defined ptrace.h should be taken from /usr/include/asm-sparc64
@ -139,4 +144,8 @@ uintptr_t lookup_symbol(struct ps_prochandle* ph, const char* object_name,
// address->nearest symbol lookup. return NULL for no symbol // address->nearest symbol lookup. return NULL for no symbol
const char* symbol_for_pc(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* poffset); const char* symbol_for_pc(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* poffset);
struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj);
void throw_new_debugger_exception(JNIEnv* env, const char* errMsg);
#endif //__LIBPROC_H_ #endif //__LIBPROC_H_

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -549,7 +549,13 @@ public class HotSpotAgent {
machDesc = new MachineDescriptionSPARC32Bit(); machDesc = new MachineDescriptionSPARC32Bit();
} }
} else { } else {
throw new DebuggerException("Linux only supported on x86/ia64/amd64/sparc/sparc64"); try {
machDesc = (MachineDescription)
Class.forName("sun.jvm.hotspot.debugger.MachineDescription" +
cpu.toUpperCase()).newInstance();
} catch (Exception e) {
throw new DebuggerException("Linux not supported on machine type " + cpu);
}
} }
LinuxDebuggerLocal dbg = LinuxDebuggerLocal dbg =

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -737,9 +737,16 @@ public class BugSpotAgent {
machDesc = new MachineDescriptionSPARC32Bit(); machDesc = new MachineDescriptionSPARC32Bit();
} }
} else { } else {
throw new DebuggerException("Linux only supported on x86/ia64/amd64/sparc/sparc64"); try {
machDesc = (MachineDescription)
Class.forName("sun.jvm.hotspot.debugger.MachineDescription" +
cpu.toUpperCase()).newInstance();
} catch (Exception e) {
throw new DebuggerException("unsupported machine type");
}
} }
// Note we do not use a cache for the local debugger in server // Note we do not use a cache for the local debugger in server
// mode; it will be taken care of on the client side (once remote // mode; it will be taken care of on the client side (once remote
// debugging is implemented). // debugging is implemented).

View file

@ -93,7 +93,6 @@ public class CodeBlob extends VMObject {
public boolean isUncommonTrapStub() { return false; } public boolean isUncommonTrapStub() { return false; }
public boolean isExceptionStub() { return false; } public boolean isExceptionStub() { return false; }
public boolean isSafepointStub() { return false; } public boolean isSafepointStub() { return false; }
public boolean isRicochetBlob() { return false; }
public boolean isAdapterBlob() { return false; } public boolean isAdapterBlob() { return false; }
// Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod() // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()

View file

@ -57,7 +57,6 @@ public class CodeCache {
virtualConstructor.addMapping("BufferBlob", BufferBlob.class); virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
virtualConstructor.addMapping("nmethod", NMethod.class); virtualConstructor.addMapping("nmethod", NMethod.class);
virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class); virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class); virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class); virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class);
virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class); virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
@ -127,10 +126,6 @@ public class CodeCache {
Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)), Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)),
"found wrong CodeBlob"); "found wrong CodeBlob");
} }
if (result.isRicochetBlob()) {
// This should probably be done for other SingletonBlobs
return VM.getVM().ricochetBlob();
}
return result; return result;
} }

View file

@ -1,70 +0,0 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
/** RicochetBlob (currently only used by Compiler 2) */
public class RicochetBlob extends SingletonBlob {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("RicochetBlob");
bounceOffsetField = type.getCIntegerField("_bounce_offset");
exceptionOffsetField = type.getCIntegerField("_exception_offset");
}
private static CIntegerField bounceOffsetField;
private static CIntegerField exceptionOffsetField;
public RicochetBlob(Address addr) {
super(addr);
}
public boolean isRicochetBlob() {
return true;
}
public Address bounceAddr() {
return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr));
}
public boolean returnsToBounceAddr(Address pc) {
Address bouncePc = bounceAddr();
return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc));
}
}

View file

@ -24,6 +24,8 @@
package sun.jvm.hotspot.debugger; package sun.jvm.hotspot.debugger;
import sun.jvm.hotspot.debugger.cdbg.*;
/** This is a placeholder interface for a thread's context, containing /** This is a placeholder interface for a thread's context, containing
only integer registers (no floating-point ones). What it contains only integer registers (no floating-point ones). What it contains
is platform-dependent. Not all registers are guaranteed to be is platform-dependent. Not all registers are guaranteed to be
@ -54,4 +56,6 @@ public interface ThreadContext {
/** Set the value of the specified register (0..getNumRegisters() - /** Set the value of the specified register (0..getNumRegisters() -
1) as an Address */ 1) as an Address */
public void setRegisterAsAddress(int index, Address value); public void setRegisterAsAddress(int index, Address value);
public CFrame getTopFrame(Debugger dbg);
} }

View file

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.amd64; package sun.jvm.hotspot.debugger.amd64;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on amd64 platforms; only a sub-portion /** Specifies the thread context on amd64 platforms; only a sub-portion
* of the context is guaranteed to be present on all operating * of the context is guaranteed to be present on all operating
@ -98,6 +99,10 @@ public abstract class AMD64ThreadContext implements ThreadContext {
return data[index]; return data[index];
} }
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to /** This can't be implemented in this class since we would have to
* tie the implementation to, for example, the debugging system */ * tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value); public abstract void setRegisterAsAddress(int index, Address value);

View file

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.ia64; package sun.jvm.hotspot.debugger.ia64;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on ia64 platform; only a sub-portion /** Specifies the thread context on ia64 platform; only a sub-portion
of the context is guaranteed to be present on all operating of the context is guaranteed to be present on all operating
@ -172,6 +173,10 @@ public abstract class IA64ThreadContext implements ThreadContext {
return data[index]; return data[index];
} }
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to /** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */ tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value); public abstract void setRegisterAsAddress(int index, Address value);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -107,7 +107,9 @@ class LinuxCDebugger implements CDebugger {
if (pc == null) return null; if (pc == null) return null;
return new LinuxSPARCCFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize()); return new LinuxSPARCCFrame(dbg, sp, pc, LinuxDebuggerLocal.getAddressSize());
} else { } else {
throw new DebuggerException(cpu + " is not yet supported"); // Runtime exception thrown by LinuxThreadContextFactory if unknown cpu
ThreadContext context = (ThreadContext) thread.getContext();
return context.getTopFrame(dbg);
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
package sun.jvm.hotspot.debugger.linux; package sun.jvm.hotspot.debugger.linux;
import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.linux.amd64.*; import sun.jvm.hotspot.debugger.linux.amd64.*;
import sun.jvm.hotspot.debugger.linux.ia64.*; import sun.jvm.hotspot.debugger.linux.ia64.*;
@ -41,8 +42,16 @@ class LinuxThreadContextFactory {
return new LinuxIA64ThreadContext(dbg); return new LinuxIA64ThreadContext(dbg);
} else if (cpu.equals("sparc")) { } else if (cpu.equals("sparc")) {
return new LinuxSPARCThreadContext(dbg); return new LinuxSPARCThreadContext(dbg);
} else { } else {
throw new RuntimeException("cpu " + cpu + " is not yet supported"); try {
Class tcc = Class.forName("sun.jvm.hotspot.debugger.linux." +
cpu.toLowerCase() + ".Linux" + cpu.toUpperCase() +
"ThreadContext");
Constructor[] ctcc = tcc.getConstructors();
return (ThreadContext)ctcc[0].newInstance(dbg);
} catch (Exception e) {
throw new RuntimeException("cpu " + cpu + " is not yet supported");
}
} }
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@ package sun.jvm.hotspot.debugger.proc;
import java.io.*; import java.io.*;
import java.net.*; import java.net.*;
import java.util.*; import java.util.*;
import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*; import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.proc.amd64.*; import sun.jvm.hotspot.debugger.proc.amd64.*;
@ -86,7 +87,16 @@ public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
pcRegIndex = AMD64ThreadContext.RIP; pcRegIndex = AMD64ThreadContext.RIP;
fpRegIndex = AMD64ThreadContext.RBP; fpRegIndex = AMD64ThreadContext.RBP;
} else { } else {
try {
Class tfc = Class.forName("sun.jvm.hotspot.debugger.proc." +
cpu.toLowerCase() + ".Proc" + cpu.toUpperCase() +
"ThreadFactory");
Constructor[] ctfc = tfc.getConstructors();
threadFactory = (ProcThreadFactory)ctfc[0].newInstance(this);
} catch (Exception e) {
throw new RuntimeException("Thread access for CPU architecture " + PlatformInfo.getCPU() + " not yet supported"); throw new RuntimeException("Thread access for CPU architecture " + PlatformInfo.getCPU() + " not yet supported");
// Note: pcRegIndex and fpRegIndex do not appear to be referenced
}
} }
if (useCache) { if (useCache) {
// Cache portion of the remote process's address space. // Cache portion of the remote process's address space.
@ -375,7 +385,11 @@ public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
int pagesize = getPageSize0(); int pagesize = getPageSize0();
if (pagesize == -1) { if (pagesize == -1) {
// return the hard coded default value. // return the hard coded default value.
pagesize = (PlatformInfo.getCPU().equals("x86"))? 4096 : 8192; if (PlatformInfo.getCPU().equals("sparc") ||
PlatformInfo.getCPU().equals("amd64") )
pagesize = 8196;
else
pagesize = 4096;
} }
return pagesize; return pagesize;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@ package sun.jvm.hotspot.debugger.remote;
import java.rmi.*; import java.rmi.*;
import java.util.*; import java.util.*;
import java.lang.reflect.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*; import sun.jvm.hotspot.debugger.cdbg.*;
@ -70,7 +71,18 @@ public class RemoteDebuggerClient extends DebuggerBase implements JVMDebugger {
cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize);
unalignedAccessesOkay = true; unalignedAccessesOkay = true;
} else { } else {
throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported"); try {
Class tf = Class.forName("sun.jvm.hotspot.debugger.remote." +
cpu.toLowerCase() + ".Remote" + cpu.toUpperCase() +
"ThreadFactory");
Constructor[] ctf = tf.getConstructors();
threadFactory = (RemoteThreadFactory)ctf[0].newInstance(this);
} catch (Exception e) {
throw new DebuggerException("Thread access for CPU architecture " + cpu + " not yet supported");
}
cachePageSize = 4096;
cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize);
unalignedAccessesOkay = false;
} }
// Cache portion of the remote process's address space. // Cache portion of the remote process's address space.

View file

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.sparc; package sun.jvm.hotspot.debugger.sparc;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Currently provides just the minimal information necessary to get /** Currently provides just the minimal information necessary to get
stack traces working. FIXME: currently hardwired for v9 -- will stack traces working. FIXME: currently hardwired for v9 -- will
@ -124,6 +125,10 @@ public abstract class SPARCThreadContext implements ThreadContext {
return data[index]; return data[index];
} }
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to /** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */ tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value); public abstract void setRegisterAsAddress(int index, Address value);

View file

@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.x86; package sun.jvm.hotspot.debugger.x86;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
/** Specifies the thread context on x86 platforms; only a sub-portion /** Specifies the thread context on x86 platforms; only a sub-portion
of the context is guaranteed to be present on all operating of the context is guaranteed to be present on all operating
@ -109,6 +110,10 @@ public abstract class X86ThreadContext implements ThreadContext {
return data[index]; return data[index];
} }
public CFrame getTopFrame(Debugger dbg) {
return null;
}
/** This can't be implemented in this class since we would have to /** This can't be implemented in this class since we would have to
tie the implementation to, for example, the debugging system */ tie the implementation to, for example, the debugging system */
public abstract void setRegisterAsAddress(int index, Address value); public abstract void setRegisterAsAddress(int index, Address value);

View file

@ -147,12 +147,6 @@ public abstract class Frame implements Cloneable {
} }
} }
public boolean isRicochetFrame() {
CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC());
RicochetBlob rcb = VM.getVM().ricochetBlob();
return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC()));
}
public boolean isCompiledFrame() { public boolean isCompiledFrame() {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(!VM.getVM().isCore(), "noncore builds only"); Assert.that(!VM.getVM().isCore(), "noncore builds only");
@ -216,8 +210,7 @@ public abstract class Frame implements Cloneable {
public Frame realSender(RegisterMap map) { public Frame realSender(RegisterMap map) {
if (!VM.getVM().isCore()) { if (!VM.getVM().isCore()) {
Frame result = sender(map); Frame result = sender(map);
while (result.isRuntimeFrame() || while (result.isRuntimeFrame()) {
result.isRicochetFrame()) {
result = result.sender(map); result = result.sender(map);
} }
return result; return result;
@ -631,9 +624,6 @@ public abstract class Frame implements Cloneable {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(cb != null, "sanity check"); Assert.that(cb != null, "sanity check");
} }
if (cb == VM.getVM().ricochetBlob()) {
oopsRicochetDo(oopVisitor, regMap);
}
if (cb.getOopMaps() != null) { if (cb.getOopMaps() != null) {
OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging()); OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging());
@ -650,10 +640,6 @@ public abstract class Frame implements Cloneable {
// } // }
} }
private void oopsRicochetDo (AddressVisitor oopVisitor, RegisterMap regMap) {
// XXX Empty for now
}
// FIXME: implement the above routines, plus add // FIXME: implement the above routines, plus add
// oops_interpreted_arguments_do and oops_compiled_arguments_do // oops_interpreted_arguments_do and oops_compiled_arguments_do
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -91,6 +91,16 @@ public class Threads {
access = new LinuxAMD64JavaThreadPDAccess(); access = new LinuxAMD64JavaThreadPDAccess();
} else if (cpu.equals("sparc")) { } else if (cpu.equals("sparc")) {
access = new LinuxSPARCJavaThreadPDAccess(); access = new LinuxSPARCJavaThreadPDAccess();
} else {
try {
access = (JavaThreadPDAccess)
Class.forName("sun.jvm.hotspot.runtime.linux_" +
cpu.toLowerCase() + ".Linux" + cpu.toUpperCase() +
"JavaThreadPDAccess").newInstance();
} catch (Exception e) {
throw new RuntimeException("OS/CPU combination " + os + "/" + cpu +
" not yet supported");
}
} }
} else if (os.equals("bsd")) { } else if (os.equals("bsd")) {
if (cpu.equals("x86")) { if (cpu.equals("x86")) {

View file

@ -87,13 +87,13 @@ public class VM {
private StubRoutines stubRoutines; private StubRoutines stubRoutines;
private Bytes bytes; private Bytes bytes;
private RicochetBlob ricochetBlob;
/** Flags indicating whether we are attached to a core, C1, or C2 build */ /** Flags indicating whether we are attached to a core, C1, or C2 build */
private boolean usingClientCompiler; private boolean usingClientCompiler;
private boolean usingServerCompiler; private boolean usingServerCompiler;
/** Flag indicating whether UseTLAB is turned on */ /** Flag indicating whether UseTLAB is turned on */
private boolean useTLAB; private boolean useTLAB;
/** Flag indicating whether invokedynamic support is on */
private boolean enableInvokeDynamic;
/** alignment constants */ /** alignment constants */
private boolean isLP64; private boolean isLP64;
private int bytesPerLong; private int bytesPerLong;
@ -319,6 +319,7 @@ public class VM {
} }
useTLAB = (db.lookupIntConstant("UseTLAB").intValue() != 0); useTLAB = (db.lookupIntConstant("UseTLAB").intValue() != 0);
enableInvokeDynamic = (db.lookupIntConstant("EnableInvokeDynamic").intValue() != 0);
if (debugger != null) { if (debugger != null) {
isLP64 = debugger.getMachineDescription().isLP64(); isLP64 = debugger.getMachineDescription().isLP64();
@ -554,6 +555,10 @@ public class VM {
return useTLAB; return useTLAB;
} }
public boolean getEnableInvokeDynamic() {
return enableInvokeDynamic;
}
public TypeDataBase getTypeDataBase() { public TypeDataBase getTypeDataBase() {
return db; return db;
} }
@ -628,18 +633,6 @@ public class VM {
return stubRoutines; return stubRoutines;
} }
public RicochetBlob ricochetBlob() {
if (ricochetBlob == null) {
Type ricochetType = db.lookupType("SharedRuntime");
AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob");
Address addr = ricochetBlobAddress.getValue();
if (addr != null) {
ricochetBlob = new RicochetBlob(addr);
}
}
return ricochetBlob;
}
public VMRegImpl getVMRegImplInfo() { public VMRegImpl getVMRegImplInfo() {
if (vmregImpl == null) { if (vmregImpl == null) {
vmregImpl = new VMRegImpl(); vmregImpl = new VMRegImpl();

View file

@ -571,8 +571,6 @@ public class SPARCFrame extends Frame {
// registers callee-saved, then we will have to copy over // registers callee-saved, then we will have to copy over
// the RegisterMap update logic from the Intel code. // the RegisterMap update logic from the Intel code.
if (isRicochetFrame()) return senderForRicochetFrame(map);
// The constructor of the sender must know whether this frame is interpreted so it can set the // The constructor of the sender must know whether this frame is interpreted so it can set the
// sender's _interpreter_sp_adjustment field. // sender's _interpreter_sp_adjustment field.
if (VM.getVM().getInterpreter().contains(pc)) { if (VM.getVM().getInterpreter().contains(pc)) {
@ -945,20 +943,6 @@ public class SPARCFrame extends Frame {
} }
private Frame senderForRicochetFrame(SPARCRegisterMap map) {
if (DEBUG) {
System.out.println("senderForRicochetFrame");
}
//RicochetFrame* f = RicochetFrame::from_frame(fr);
// Cf. is_interpreted_frame path of frame::sender
Address youngerSP = getSP();
Address sp = getSenderSP();
map.makeIntegerRegsUnsaved();
map.shiftWindow(sp, youngerSP);
boolean thisFrameAdjustedStack = true; // I5_savedSP is live in this RF
return new SPARCFrame(biasSP(sp), biasSP(youngerSP), thisFrameAdjustedStack);
}
private Frame senderForEntryFrame(RegisterMap regMap) { private Frame senderForEntryFrame(RegisterMap regMap) {
SPARCRegisterMap map = (SPARCRegisterMap) regMap; SPARCRegisterMap map = (SPARCRegisterMap) regMap;

View file

@ -1,77 +0,0 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.sparc;
import java.util.*;
import sun.jvm.hotspot.asm.sparc.SPARCRegister;
import sun.jvm.hotspot.asm.sparc.SPARCRegisters;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class SPARCRicochetFrame {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private SPARCFrame frame;
private static void initialize(TypeDataBase db) {
// Type type = db.lookupType("MethodHandles::RicochetFrame");
}
static SPARCRicochetFrame fromFrame(SPARCFrame f) {
return new SPARCRicochetFrame(f);
}
private SPARCRicochetFrame(SPARCFrame f) {
frame = f;
}
private Address registerValue(SPARCRegister reg) {
return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0);
}
public Address savedArgsBase() {
return registerValue(SPARCRegisters.L4);
}
public Address exactSenderSP() {
return registerValue(SPARCRegisters.I5);
}
public Address senderLink() {
return frame.getSenderSP();
}
public Address senderPC() {
return frame.getSenderPC();
}
public Address extendedSenderSP() {
return savedArgsBase();
}
}

View file

@ -269,7 +269,6 @@ public class X86Frame extends Frame {
if (isEntryFrame()) return senderForEntryFrame(map); if (isEntryFrame()) return senderForEntryFrame(map);
if (isInterpretedFrame()) return senderForInterpreterFrame(map); if (isInterpretedFrame()) return senderForInterpreterFrame(map);
if (isRicochetFrame()) return senderForRicochetFrame(map);
if(cb == null) { if(cb == null) {
cb = VM.getVM().getCodeCache().findBlob(getPC()); cb = VM.getVM().getCodeCache().findBlob(getPC());
@ -288,16 +287,6 @@ public class X86Frame extends Frame {
return new X86Frame(getSenderSP(), getLink(), getSenderPC()); return new X86Frame(getSenderSP(), getLink(), getSenderPC());
} }
private Frame senderForRicochetFrame(X86RegisterMap map) {
if (DEBUG) {
System.out.println("senderForRicochetFrame");
}
X86RicochetFrame f = X86RicochetFrame.fromFrame(this);
if (map.getUpdateMap())
updateMapWithSavedLink(map, f.senderLinkAddress());
return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC());
}
private Frame senderForEntryFrame(X86RegisterMap map) { private Frame senderForEntryFrame(X86RegisterMap map) {
if (DEBUG) { if (DEBUG) {
System.out.println("senderForEntryFrame"); System.out.println("senderForEntryFrame");

View file

@ -1,81 +0,0 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.x86;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class X86RicochetFrame extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("MethodHandles::RicochetFrame");
senderLinkField = type.getAddressField("_sender_link");
savedArgsBaseField = type.getAddressField("_saved_args_base");
exactSenderSPField = type.getAddressField("_exact_sender_sp");
senderPCField = type.getAddressField("_sender_pc");
}
private static AddressField senderLinkField;
private static AddressField savedArgsBaseField;
private static AddressField exactSenderSPField;
private static AddressField senderPCField;
static X86RicochetFrame fromFrame(X86Frame f) {
return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset()));
}
private X86RicochetFrame(Address addr) {
super(addr);
}
public Address senderLink() {
return senderLinkField.getValue(addr);
}
public Address senderLinkAddress() {
return addr.addOffsetTo(senderLinkField.getOffset());
}
public Address savedArgsBase() {
return savedArgsBaseField.getValue(addr);
}
public Address extendedSenderSP() {
return savedArgsBase();
}
public Address exactSenderSP() {
return exactSenderSPField.getValue(addr);
}
public Address senderPC() {
return senderPCField.getValue(addr);
}
}

View file

@ -204,7 +204,13 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
} else if (cpu.equals("ia64")) { } else if (cpu.equals("ia64")) {
cpuHelper = new IA64Helper(); cpuHelper = new IA64Helper();
} else { } else {
try {
cpuHelper = (CPUHelper)Class.forName("sun.jvm.hotspot.asm." +
cpu.toLowerCase() + "." + cpu.toUpperCase() +
"Helper").newInstance();
} catch (Exception e) {
throw new RuntimeException("cpu '" + cpu + "' is not yet supported!"); throw new RuntimeException("cpu '" + cpu + "' is not yet supported!");
}
} }
} }

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.utilities;
public interface AltPlatformInfo {
// Additional cpu types can be tested via this interface
public boolean knownCPU(String cpu);
}

View file

@ -64,6 +64,13 @@ public class PlatformInfo {
} else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64")) { } else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64")) {
return cpu; return cpu;
} else { } else {
try {
Class pic = Class.forName("sun.jvm.hotspot.utilities.PlatformInfoClosed");
AltPlatformInfo api = (AltPlatformInfo)pic.newInstance();
if (api.knownCPU(cpu)) {
return cpu;
}
} catch (Exception e) {}
throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported"); throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
} }
} }

View file

@ -22,6 +22,14 @@
# #
# #
ifeq ($(HS_ALT_MAKE),)
ifneq ($(OPENJDK),true)
HS_ALT_MAKE=$(GAMMADIR)/make/closed
else
HS_ALT_MAKE=NO_SUCH_PATH
endif
endif
# The common definitions for hotspot builds. # The common definitions for hotspot builds.
# Optionally include SPEC file generated by configure. # Optionally include SPEC file generated by configure.
@ -327,3 +335,4 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
ifndef JAVASE_EMBEDDED ifndef JAVASE_EMBEDDED
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
endif endif

View file

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=24 HS_MAJOR_VER=24
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=19 HS_BUILD_NUMBER=22
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8

View file

@ -38,7 +38,7 @@ jprt.need.sibling.build=false
# This tells jprt what default release we want to build # This tells jprt what default release we want to build
jprt.hotspot.default.release=jdk7 jprt.hotspot.default.release=jdk8
jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}} jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
@ -54,77 +54,77 @@ jprt.sync.push=false
# Define the Solaris platforms we want for the various releases # Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7u6=${jprt.my.solaris.sparc.jdk7} jprt.my.solaris.sparc.jdk7u8=${jprt.my.solaris.sparc.jdk7}
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7u6=${jprt.my.solaris.sparcv9.jdk7} jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.i586.jdk8=solaris_i586_5.10 jprt.my.solaris.i586.jdk8=solaris_i586_5.10
jprt.my.solaris.i586.jdk7=solaris_i586_5.10 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
jprt.my.solaris.i586.jdk7u6=${jprt.my.solaris.i586.jdk7} jprt.my.solaris.i586.jdk7u8=${jprt.my.solaris.i586.jdk7}
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8=solaris_x64_5.10 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
jprt.my.solaris.x64.jdk7=solaris_x64_5.10 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk7u6=${jprt.my.solaris.x64.jdk7} jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8=linux_i586_2.6 jprt.my.linux.i586.jdk8=linux_i586_2.6
jprt.my.linux.i586.jdk7=linux_i586_2.6 jprt.my.linux.i586.jdk7=linux_i586_2.6
jprt.my.linux.i586.jdk7u6=${jprt.my.linux.i586.jdk7} jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}} jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8=linux_x64_2.6 jprt.my.linux.x64.jdk8=linux_x64_2.6
jprt.my.linux.x64.jdk7=linux_x64_2.6 jprt.my.linux.x64.jdk7=linux_x64_2.6
jprt.my.linux.x64.jdk7u6=${jprt.my.linux.x64.jdk7} jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}} jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8=linux_ppc_2.6 jprt.my.linux.ppc.jdk8=linux_ppc_2.6
jprt.my.linux.ppc.jdk7=linux_ppc_2.6 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
jprt.my.linux.ppc.jdk7u6=${jprt.my.linux.ppc.jdk7} jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}} jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7u6=${jprt.my.linux.ppcv2.jdk7} jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}} jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7u6=${jprt.my.linux.ppcsflt.jdk7} jprt.my.linux.ppcsflt.jdk7u8=${jprt.my.linux.ppcsflt.jdk7}
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}} jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7u6=${jprt.my.linux.armvfp.jdk7} jprt.my.linux.armvfp.jdk7u8=${jprt.my.linux.armvfp.jdk7}
jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}} jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
jprt.my.linux.armv6.jdk8=linux_armv6_2.6 jprt.my.linux.armv6.jdk8=linux_armv6_2.6
jprt.my.linux.armv6.jdk7=linux_armv6_2.6 jprt.my.linux.armv6.jdk7=linux_armv6_2.6
jprt.my.linux.armv6.jdk7u6=${jprt.my.linux.armv6.jdk7} jprt.my.linux.armv6.jdk7u8=${jprt.my.linux.armv6.jdk7}
jprt.my.linux.armv6=${jprt.my.linux.armv6.${jprt.tools.default.release}} jprt.my.linux.armv6=${jprt.my.linux.armv6.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7u6=${jprt.my.linux.armsflt.jdk7} jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}} jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8=macosx_x64_10.7 jprt.my.macosx.x64.jdk8=macosx_x64_10.7
jprt.my.macosx.x64.jdk7=macosx_x64_10.7 jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u6=${jprt.my.macosx.x64.jdk7} jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}} jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1 jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1 jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk7u6=${jprt.my.windows.i586.jdk7} jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}} jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_5.2 jprt.my.windows.x64.jdk8=windows_x64_5.2
jprt.my.windows.x64.jdk7=windows_x64_5.2 jprt.my.windows.x64.jdk7=windows_x64_5.2
jprt.my.windows.x64.jdk7u6=${jprt.my.windows.x64.jdk7} jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree # Standard list of jprt build targets for this source tree
@ -159,7 +159,7 @@ jprt.build.targets.all=${jprt.build.targets.standard}, \
jprt.build.targets.jdk8=${jprt.build.targets.all} jprt.build.targets.jdk8=${jprt.build.targets.all}
jprt.build.targets.jdk7=${jprt.build.targets.all} jprt.build.targets.jdk7=${jprt.build.targets.all}
jprt.build.targets.jdk7u6=${jprt.build.targets.all} jprt.build.targets.jdk7u8=${jprt.build.targets.all}
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}} jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
# Subset lists of test targets for this source tree # Subset lists of test targets for this source tree
@ -452,7 +452,7 @@ jprt.test.targets.embedded= \
jprt.test.targets.jdk8=${jprt.test.targets.standard} jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard} jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7u6=${jprt.test.targets.jdk7} jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}} jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run # The default test/Makefile targets that should be run
@ -512,7 +512,7 @@ jprt.make.rule.test.targets.embedded = \
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u6=${jprt.make.rule.test.targets.jdk7} jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
# 7155453: Work-around to prevent popups on OSX from blocking test completion # 7155453: Work-around to prevent popups on OSX from blocking test completion

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -133,8 +133,10 @@ $(GENERATEDFILES): refresh_adfiles
# Note that product files are updated via "mv", which is atomic. # Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
# Debuggable by default ifneq ($(DEBUG_BINARIES), true)
CFLAGS += -g # Debuggable by default (unless already done by DEBUG_BINARIES)
CFLAGS += -g
endif
# Pass -D flags into ADLC. # Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS) ADLCFLAGS += $(SYSDEFS)

View file

@ -295,6 +295,8 @@ ADD_SA_BINARIES/ia64 =
ADD_SA_BINARIES/arm = ADD_SA_BINARIES/arm =
ADD_SA_BINARIES/zero = ADD_SA_BINARIES/zero =
-include $(HS_ALT_MAKE)/linux/makefiles/defs.make
EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH)) EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))

View file

@ -215,45 +215,44 @@ AOUT_FLAGS += -Xlinker -export-dynamic
#------------------------------------------------------------------------ #------------------------------------------------------------------------
# Debug flags # Debug flags
# Use the stabs format for debugging information (this is the default # DEBUG_BINARIES uses full -g debug information for all configs
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm_g.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm_g.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
DEBUG_CFLAGS += -gstabs
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
FASTDEBUG_CFLAGS += -gstabs
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
OPT_CFLAGS += -gstabs
endif
endif
# DEBUG_BINARIES overrides everything, use full -g debug information
ifeq ($(DEBUG_BINARIES), true) ifeq ($(DEBUG_BINARIES), true)
DEBUG_CFLAGS = -g CFLAGS += -g
CFLAGS += $(DEBUG_CFLAGS) else
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm_g.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm_g.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
DEBUG_CFLAGS += -gstabs
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
FASTDEBUG_CFLAGS += -gstabs
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
OPT_CFLAGS += -gstabs
endif
endif
endif endif
# If we are building HEADLESS, pass on to VM # If we are building HEADLESS, pass on to VM

View file

@ -30,10 +30,16 @@
include $(GAMMADIR)/make/linux/makefiles/rules.make include $(GAMMADIR)/make/linux/makefiles/rules.make
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
AGENT_DIR = $(GAMMADIR)/agent AGENT_DIR = $(GAMMADIR)/agent
include $(GAMMADIR)/make/sa.files include $(GAMMADIR)/make/sa.files
-include $(HS_ALT_MAKE)/linux/makefiles/sa.make
TOPDIR = $(shell echo `pwd`) TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated GENERATED = $(TOPDIR)/../generated
@ -52,17 +58,15 @@ SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VE
SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
# if $(AGENT_DIR) does not exist, we don't build SA # if $(AGENT_DIR) does not exist, we don't build SA
# also, we don't build SA on Itanium, PowerPC, ARM or zero. # also, we don't build SA on Itanium or zero.
all: all:
if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \ if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
-a "$(SRCARCH)" != "arm" \
-a "$(SRCARCH)" != "ppc" \
-a "$(SRCARCH)" != "zero" ] ; then \ -a "$(SRCARCH)" != "zero" ] ; then \
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \ $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi fi
$(GENERATED)/sa-jdi.jar: $(AGENT_FILES) $(GENERATED)/sa-jdi.jar:: $(AGENT_FILES)
$(QUIETLY) echo "Making $@" $(QUIETLY) echo "Making $@"
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@ -111,3 +115,5 @@ clean:
rm -rf $(SA_CLASSDIR) rm -rf $(SA_CLASSDIR)
rm -rf $(GENERATED)/sa-jdi.jar rm -rf $(GENERATED)/sa-jdi.jar
rm -rf $(AGENT_FILES_LIST) rm -rf $(AGENT_FILES_LIST)
-include $(HS_ALT_MAKE)/linux/makefiles/sa-rules.make

View file

@ -21,6 +21,8 @@
# questions. # questions.
# #
# #
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
# Rules to build serviceability agent library, used by vm.make # Rules to build serviceability agent library, used by vm.make
@ -48,6 +50,8 @@ SASRCFILES = $(SASRCDIR)/salibelf.c \
$(SASRCDIR)/ps_core.c \ $(SASRCDIR)/ps_core.c \
$(SASRCDIR)/LinuxDebuggerLocal.c $(SASRCDIR)/LinuxDebuggerLocal.c
-include $(HS_ALT_MAKE)/linux/makefiles/saproc.make
SAMAPFILE = $(SASRCDIR)/mapfile SAMAPFILE = $(SASRCDIR)/mapfile
DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC) DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
@ -60,15 +64,19 @@ ifeq ($(DEBUG_BINARIES), true)
endif endif
# if $(AGENT_DIR) does not exist, we don't build SA # if $(AGENT_DIR) does not exist, we don't build SA
# also, we don't build SA on Itanium, PPC, ARM or zero. # also, we don't build SA on Itanium or zero.
ifneq ($(wildcard $(AGENT_DIR)),) ifneq ($(wildcard $(AGENT_DIR)),)
ifneq ($(filter-out ia64 arm ppc zero,$(SRCARCH)),) ifneq ($(filter-out ia64 zero,$(SRCARCH)),)
BUILDLIBSAPROC = $(LIBSAPROC) BUILDLIBSAPROC = $(LIBSAPROC)
endif endif
endif endif
ifneq ($(ALT_SASRCDIR),)
ALT_SAINCDIR=-I$(ALT_SASRCDIR)
else
ALT_SAINCDIR=
endif
SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE) SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE) $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
@ -84,6 +92,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
-I$(GENERATED) \ -I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \ -I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \ -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(ALT_SAINCDIR) \
$(SASRCFILES) \ $(SASRCFILES) \
$(SA_LFLAGS) \ $(SA_LFLAGS) \
$(SA_DEBUG_CFLAGS) \ $(SA_DEBUG_CFLAGS) \

View file

@ -32,7 +32,7 @@ ifneq ($(OSNAME), windows)
ifndef LP64 ifndef LP64
PARTIAL_NONPIC=1 PARTIAL_NONPIC=1
endif endif
PIC_ARCH = ppc PIC_ARCH = ppc arm
ifneq ("$(filter $(PIC_ARCH),$(BUILDARCH))","") ifneq ("$(filter $(PIC_ARCH),$(BUILDARCH))","")
PARTIAL_NONPIC=0 PARTIAL_NONPIC=0
endif endif

View file

@ -188,14 +188,22 @@ ifdef COOKED_BUILD_NUMBER
MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER) MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
endif endif
NMAKE= MAKEFLAGS= MFLAGS= nmake /NOLOGO NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
ifndef SYSTEM_UNAME
SYSTEM_UNAME := $(shell uname)
export SYSTEM_UNAME
endif
# Check for CYGWIN # Check for CYGWIN
ifneq (,$(findstring CYGWIN,$(shell uname))) ifneq (,$(findstring CYGWIN,$(SYSTEM_UNAME)))
USING_CYGWIN=true USING_CYGWIN=true
else else
USING_CYGWIN=false USING_CYGWIN=false
endif endif
# Check for MinGW
ifneq (,$(findstring MINGW,$(SYSTEM_UNAME)))
USING_MINGW=true
endif
# FIXUP: The subdirectory for a debug build is NOT the same on all platforms # FIXUP: The subdirectory for a debug build is NOT the same on all platforms
VM_DEBUG=debug VM_DEBUG=debug
@ -208,11 +216,16 @@ ifeq ($(USING_CYGWIN), true)
ABS_BOOTDIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(BOOTDIR)")) ABS_BOOTDIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(BOOTDIR)"))
ABS_GAMMADIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(GAMMADIR)")) ABS_GAMMADIR := $(subst /,\\,$(shell /bin/cygpath -m -a "$(GAMMADIR)"))
ABS_OS_MAKEFILE := $(shell /bin/cygpath -m -a "$(HS_MAKE_DIR)/$(OSNAME)")/build.make ABS_OS_MAKEFILE := $(shell /bin/cygpath -m -a "$(HS_MAKE_DIR)/$(OSNAME)")/build.make
else else ifeq ($(USING_MINGW), true)
ABS_OUTPUTDIR := $(subst /,\\,$(shell $(CD) $(OUTPUTDIR);$(PWD))) ABS_OUTPUTDIR := $(shell $(CD) $(OUTPUTDIR);$(PWD))
ABS_BOOTDIR := $(subst /,\\,$(shell $(CD) $(BOOTDIR);$(PWD))) ABS_BOOTDIR := $(shell $(CD) $(BOOTDIR);$(PWD))
ABS_GAMMADIR := $(subst /,\\,$(shell $(CD) $(GAMMADIR);$(PWD))) ABS_GAMMADIR := $(shell $(CD) $(GAMMADIR);$(PWD))
ABS_OS_MAKEFILE := $(subst /,\\,$(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make) ABS_OS_MAKEFILE := $(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make
else
ABS_OUTPUTDIR := $(subst /,\\,$(shell $(CD) $(OUTPUTDIR);$(PWD)))
ABS_BOOTDIR := $(subst /,\\,$(shell $(CD) $(BOOTDIR);$(PWD)))
ABS_GAMMADIR := $(subst /,\\,$(shell $(CD) $(GAMMADIR);$(PWD)))
ABS_OS_MAKEFILE := $(subst /,\\,$(shell $(CD) $(HS_MAKE_DIR)/$(OSNAME);$(PWD))/build.make)
endif endif
# Disable building SA on windows until we are sure # Disable building SA on windows until we are sure

View file

@ -23,14 +23,15 @@
# #
# These are the commands used externally to compile and run. # These are the commands used externally to compile and run.
# The \ are used here for traditional Windows apps and " quoted to get
# past the Unix-like shell:
!ifdef BootStrapDir !ifdef BootStrapDir
RUN_JAVA=$(BootStrapDir)\bin\java RUN_JAVA="$(BootStrapDir)\bin\java"
RUN_JAVAP=$(BootStrapDir)\bin\javap RUN_JAVAP="$(BootStrapDir)\bin\javap"
RUN_JAVAH=$(BootStrapDir)\bin\javah RUN_JAVAH="$(BootStrapDir)\bin\javah"
RUN_JAR=$(BootStrapDir)\bin\jar RUN_JAR="$(BootStrapDir)\bin\jar"
COMPILE_JAVAC=$(BootStrapDir)\bin\javac $(BOOTSTRAP_JAVAC_FLAGS) COMPILE_JAVAC="$(BootStrapDir)\bin\javac" $(BOOTSTRAP_JAVAC_FLAGS)
COMPILE_RMIC=$(BootStrapDir)\bin\rmic COMPILE_RMIC="$(BootStrapDir)\bin\rmic"
BOOT_JAVA_HOME=$(BootStrapDir) BOOT_JAVA_HOME=$(BootStrapDir)
!else !else
RUN_JAVA=java RUN_JAVA=java

View file

@ -36,37 +36,37 @@ checkAndBuildSA::
!include $(WorkSpace)/make/windows/makefiles/rules.make !include $(WorkSpace)/make/windows/makefiles/rules.make
!include $(WorkSpace)/make/sa.files !include $(WorkSpace)/make/sa.files
GENERATED = ..\generated GENERATED = ../generated
# tools.jar is needed by the JDI - SA binding # tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)\lib\tools.jar SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
SA_CLASSDIR = $(GENERATED)\saclasses SA_CLASSDIR = $(GENERATED)/saclasses
SA_BUILD_VERSION_PROP = sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION) SA_BUILD_VERSION_PROP = sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)
SA_PROPERTIES = $(SA_CLASSDIR)\sa.properties SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
default:: $(GENERATED)\sa-jdi.jar default:: $(GENERATED)/sa-jdi.jar
# Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space # Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space
# at the end of SA version string and causes a version mismatch with the target VM version. # at the end of SA version string and causes a version mismatch with the target VM version.
$(GENERATED)\sa-jdi.jar: $(AGENT_FILES:/=\) $(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR) $(QUIETLY) mkdir -p $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar @echo ...Building sa-jdi.jar into $(SA_CLASSDIR)
@echo ...$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) .... @echo ...$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) ....
@$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES:/=\) @$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES) $(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources $(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources $(QUIETLY) mkdir $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR) $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)
$(RUN_JAR) cf $@ -C $(SA_CLASSDIR) . $(RUN_JAR) cf $@ -C $(SA_CLASSDIR) .
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
@ -85,27 +85,27 @@ checkAndBuildSA:: $(SAWINDBG)
# will be useful to have the assertion checks in place # will be useful to have the assertion checks in place
!if "$(BUILDARCH)" == "ia64" !if "$(BUILDARCH)" == "ia64"
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -c
!elseif "$(BUILDARCH)" == "amd64" !elseif "$(BUILDARCH)" == "amd64"
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN64" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -c
!if "$(COMPILER_NAME)" == "VS2005" !if "$(COMPILER_NAME)" == "VS2005"
# On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line, # On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line,
# otherwise we get missing __security_check_cookie externals at link time. # otherwise we get missing __security_check_cookie externals at link time.
SA_LD_FLAGS = bufferoverflowU.lib SA_LD_FLAGS = bufferoverflowU.lib
!endif !endif
!else !else
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 -Gm $(GX_OPTION) -Od -D "WIN32" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -GZ -c
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
SA_CFLAGS = $(SA_CFLAGS) /ZI SA_CFLAGS = $(SA_CFLAGS) -ZI
!endif !endif
!endif !endif
!if "$(MT)" != "" !if "$(MT)" != ""
SA_LD_FLAGS = /manifest $(SA_LD_FLAGS) SA_LD_FLAGS = -manifest $(SA_LD_FLAGS)
!endif !endif
SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
SA_LFLAGS = $(SA_LD_FLAGS) /nologo /subsystem:console /machine:$(MACHINE) SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
SA_LFLAGS = $(SA_LFLAGS) /map /debug SA_LFLAGS = $(SA_LFLAGS) -map -debug
!endif !endif
# Note that we do not keep sawindbj.obj around as it would then # Note that we do not keep sawindbj.obj around as it would then
@ -117,15 +117,15 @@ SA_LFLAGS = $(SA_LFLAGS) /map /debug
$(SAWINDBG): $(SASRCFILE) $(SAWINDBG): $(SASRCFILE)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE) set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CXX) @<< $(CXX) @<<
/I"$(BootStrapDir)/include" /I"$(BootStrapDir)/include/win32" -I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32"
/I"$(GENERATED)" $(SA_CFLAGS) -I"$(GENERATED)" $(SA_CFLAGS)
$(SASRCFILE) $(SASRCFILE)
/out:$*.obj -out:$*.obj
<< <<
set LIB=$(SA_LIB)$(LIB) set LIB=$(SA_LIB)$(LIB)
$(LD) /out:$@ /DLL $*.obj dbgeng.lib $(SA_LFLAGS) $(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS)
!if "$(MT)" != "" !if "$(MT)" != ""
$(MT) /manifest $(@F).manifest /outputresource:$(@F);#2 $(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
!endif !endif
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1" !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
!if "$(ZIP_DEBUGINFO_FILES)" == "1" !if "$(ZIP_DEBUGINFO_FILES)" == "1"
@ -136,6 +136,6 @@ $(SAWINDBG): $(SASRCFILE)
-@rm -f $*.obj -@rm -f $*.obj
cleanall : cleanall :
rm -rf $(GENERATED:\=/)/saclasses rm -rf $(GENERATED)/saclasses
rm -rf $(GENERATED:\=/)/sa-jdi.jar rm -rf $(GENERATED)/sa-jdi.jar
!endif !endif

View file

@ -36,11 +36,12 @@ CXX=cl.exe
!ifdef SUBDIRS !ifdef SUBDIRS
# \ is used below because $(MAKE) is nmake here, which expects Windows paths
$(SUBDIRS): FORCE $(SUBDIRS): FORCE
@if not exist $@ mkdir $@ @if not exist $@ mkdir $@
@if not exist $@\local.make echo # Empty > $@\local.make @if not exist $@/local.make echo # Empty > $@/local.make
@echo nmake $(ACTION) in $(DIR)\$@ @echo nmake $(ACTION) in $(DIR)/$@
cd $@ && $(MAKE) /NOLOGO /f $(WorkSpace)\make\windows\makefiles\$@.make $(ACTION) DIR=$(DIR)\$@ BUILD_FLAVOR=$(BUILD_FLAVOR) cd $@ && $(MAKE) -NOLOGO -f $(WorkSpace)\make\windows\makefiles\$@.make $(ACTION) DIR=$(DIR)\$@ BUILD_FLAVOR=$(BUILD_FLAVOR)
!endif !endif
# Creates the needed directory # Creates the needed directory

View file

@ -108,7 +108,7 @@ ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) \
-define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\" -define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\"
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class $(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions) @$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
clean: clean:
@rm -rf $(HOTSPOTBUILDSPACE)/classes @rm -rf $(HOTSPOTBUILDSPACE)/classes

View file

@ -44,8 +44,10 @@
#ifdef PRODUCT #ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */ #define BLOCK_COMMENT(str) /* nothing */
#define STOP(error) stop(error)
#else #else
#define BLOCK_COMMENT(str) block_comment(str) #define BLOCK_COMMENT(str) block_comment(str)
#define STOP(error) block_comment(error); stop(error)
#endif #endif
// Convert the raw encoding form into the form expected by the // Convert the raw encoding form into the form expected by the
@ -992,7 +994,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
save_frame(0); // to avoid clobbering O0 save_frame(0); // to avoid clobbering O0
ld_ptr(pc_addr, L0); ld_ptr(pc_addr, L0);
br_null_short(L0, Assembler::pt, PcOk); br_null_short(L0, Assembler::pt, PcOk);
stop("last_Java_pc not zeroed before leaving Java"); STOP("last_Java_pc not zeroed before leaving Java");
bind(PcOk); bind(PcOk);
// Verify that flags was zeroed on return to Java // Verify that flags was zeroed on return to Java
@ -1001,7 +1003,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
tst(L0); tst(L0);
br(Assembler::zero, false, Assembler::pt, FlagsOk); br(Assembler::zero, false, Assembler::pt, FlagsOk);
delayed() -> restore(); delayed() -> restore();
stop("flags not zeroed before leaving Java"); STOP("flags not zeroed before leaving Java");
bind(FlagsOk); bind(FlagsOk);
#endif /* ASSERT */ #endif /* ASSERT */
// //
@ -1021,7 +1023,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
andcc(last_java_sp, 0x01, G0); andcc(last_java_sp, 0x01, G0);
br(Assembler::notZero, false, Assembler::pt, StackOk); br(Assembler::notZero, false, Assembler::pt, StackOk);
delayed()->nop(); delayed()->nop();
stop("Stack Not Biased in set_last_Java_frame"); STOP("Stack Not Biased in set_last_Java_frame");
bind(StackOk); bind(StackOk);
#endif // ASSERT #endif // ASSERT
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
@ -1650,23 +1652,28 @@ void MacroAssembler::safepoint() {
void RegistersForDebugging::print(outputStream* s) { void RegistersForDebugging::print(outputStream* s) {
FlagSetting fs(Debugging, true);
int j; int j;
for ( j = 0; j < 8; ++j ) for (j = 0; j < 8; ++j) {
if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]); if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); }
else s->print_cr( "fp = 0x%.16lx", i[j]); else { s->print( "fp = " ); os::print_location(s, i[j]); }
}
s->cr(); s->cr();
for ( j = 0; j < 8; ++j ) for (j = 0; j < 8; ++j) {
s->print_cr("l%d = 0x%.16lx", j, l[j]); s->print("l%d = ", j); os::print_location(s, l[j]);
}
s->cr(); s->cr();
for ( j = 0; j < 8; ++j ) for (j = 0; j < 8; ++j) {
if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]); if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); }
else s->print_cr( "sp = 0x%.16lx", o[j]); else { s->print( "sp = " ); os::print_location(s, o[j]); }
}
s->cr(); s->cr();
for ( j = 0; j < 8; ++j ) for (j = 0; j < 8; ++j) {
s->print_cr("g%d = 0x%.16lx", j, g[j]); s->print("g%d = ", j); os::print_location(s, g[j]);
}
s->cr(); s->cr();
// print out floats with compression // print out floats with compression
@ -2020,8 +2027,8 @@ void MacroAssembler::untested(const char* what) {
char* b = new char[1024]; char* b = new char[1024];
sprintf(b, "untested: %s", what); sprintf(b, "untested: %s", what);
if ( ShowMessageBoxOnError ) stop(b); if (ShowMessageBoxOnError) { STOP(b); }
else warn(b); else { warn(b); }
} }
@ -2998,26 +3005,60 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
} }
// virtual method calling
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
Register sethi_temp = method_result;
const int base = (instanceKlass::vtable_start_offset() * wordSize +
// method pointer offset within the vtable entry:
vtableEntry::method_offset_in_bytes());
RegisterOrConstant vtable_offset = vtable_index;
// Each of the following three lines potentially generates an instruction.
// But the total number of address formation instructions will always be
// at most two, and will often be zero. In any case, it will be optimal.
// If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
// If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
ld_ptr(vtable_entry_addr, method_result);
}
void MacroAssembler::check_klass_subtype(Register sub_klass, void MacroAssembler::check_klass_subtype(Register sub_klass,
Register super_klass, Register super_klass,
Register temp_reg, Register temp_reg,
Register temp2_reg, Register temp2_reg,
Label& L_success) { Label& L_success) {
Label L_failure, L_pop_to_failure;
check_klass_subtype_fast_path(sub_klass, super_klass,
temp_reg, temp2_reg,
&L_success, &L_failure, NULL);
Register sub_2 = sub_klass; Register sub_2 = sub_klass;
Register sup_2 = super_klass; Register sup_2 = super_klass;
if (!sub_2->is_global()) sub_2 = L0; if (!sub_2->is_global()) sub_2 = L0;
if (!sup_2->is_global()) sup_2 = L1; if (!sup_2->is_global()) sup_2 = L1;
bool did_save = false;
if (temp_reg == noreg || temp2_reg == noreg) {
temp_reg = L2;
temp2_reg = L3;
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
sub_klass = sub_2;
super_klass = sup_2;
did_save = true;
}
Label L_failure, L_pop_to_failure, L_pop_to_success;
check_klass_subtype_fast_path(sub_klass, super_klass,
temp_reg, temp2_reg,
(did_save ? &L_pop_to_success : &L_success),
(did_save ? &L_pop_to_failure : &L_failure), NULL);
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); if (!did_save)
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
check_klass_subtype_slow_path(sub_2, sup_2, check_klass_subtype_slow_path(sub_2, sup_2,
L2, L3, L4, L5, L2, L3, L4, L5,
NULL, &L_pop_to_failure); NULL, &L_pop_to_failure);
// on success: // on success:
bind(L_pop_to_success);
restore(); restore();
ba_short(L_success); ba_short(L_success);
@ -3234,54 +3275,6 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
} }
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type) {
assert_different_registers(mtype_reg, mh_reg, temp_reg);
// compare method type against that of the receiver
RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg);
load_heap_oop(mh_reg, mhtype_offset, temp_reg);
cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type);
}
// A method handle has a "vmslots" field which gives the size of its
// argument list in JVM stack slots. This field is either located directly
// in every method handle, or else is indirectly accessed through the
// method handle's MethodType. This macro hides the distinction.
void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
// load mh.type.form.vmslots
Register temp2_reg = vmslots_reg;
load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
}
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
assert_different_registers(mh_reg, temp_reg);
// pick out the interpreted side of the handler
// NOTE: vmentry is not an oop!
ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
// off we go...
ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);
jmp(temp_reg, 0);
// for the various stubs which take control at this point,
// see MethodHandles::generate_method_handle_stub
// Some callers can fill the delay slot.
if (emit_delayed_nop) {
delayed()->nop();
}
}
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
Register temp_reg, Register temp_reg,
int extra_slot_offset) { int extra_slot_offset) {
@ -3914,7 +3907,7 @@ void MacroAssembler::verify_tlab() {
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
or3(t1, t2, t3); or3(t1, t2, t3);
cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
stop("assert(top >= start)"); STOP("assert(top >= start)");
should_not_reach_here(); should_not_reach_here();
bind(next); bind(next);
@ -3922,13 +3915,13 @@ void MacroAssembler::verify_tlab() {
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
or3(t3, t2, t3); or3(t3, t2, t3);
cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
stop("assert(top <= end)"); STOP("assert(top <= end)");
should_not_reach_here(); should_not_reach_here();
bind(next2); bind(next2);
and3(t3, MinObjAlignmentInBytesMask, t3); and3(t3, MinObjAlignmentInBytesMask, t3);
cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
stop("assert(aligned)"); STOP("assert(aligned)");
should_not_reach_here(); should_not_reach_here();
bind(ok); bind(ok);
@ -3976,7 +3969,7 @@ void MacroAssembler::eden_allocate(
btst(MinObjAlignmentInBytesMask, obj); btst(MinObjAlignmentInBytesMask, obj);
br(Assembler::zero, false, Assembler::pt, L); br(Assembler::zero, false, Assembler::pt, L);
delayed()->nop(); delayed()->nop();
stop("eden top is not properly aligned"); STOP("eden top is not properly aligned");
bind(L); bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -4013,7 +4006,7 @@ void MacroAssembler::eden_allocate(
btst(MinObjAlignmentInBytesMask, top_addr); btst(MinObjAlignmentInBytesMask, top_addr);
br(Assembler::zero, false, Assembler::pt, L); br(Assembler::zero, false, Assembler::pt, L);
delayed()->nop(); delayed()->nop();
stop("eden top is not properly aligned"); STOP("eden top is not properly aligned");
bind(L); bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -4066,7 +4059,7 @@ void MacroAssembler::tlab_allocate(
btst(MinObjAlignmentInBytesMask, free); btst(MinObjAlignmentInBytesMask, free);
br(Assembler::zero, false, Assembler::pt, L); br(Assembler::zero, false, Assembler::pt, L);
delayed()->nop(); delayed()->nop();
stop("updated TLAB free is not properly aligned"); STOP("updated TLAB free is not properly aligned");
bind(L); bind(L);
} }
#endif // ASSERT #endif // ASSERT
@ -4164,7 +4157,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
sll_ptr(t2, LogHeapWordSize, t2); sll_ptr(t2, LogHeapWordSize, t2);
cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
stop("assert(t1 == tlab_size)"); STOP("assert(t1 == tlab_size)");
should_not_reach_here(); should_not_reach_here();
bind(ok); bind(ok);

View file

@ -2538,6 +2538,11 @@ public:
Register temp_reg, Register temp2_reg, Register temp_reg, Register temp2_reg,
Label& no_such_interface); Label& no_such_interface);
// virtual method calling
void lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result);
// Test sub_klass against super_klass, with fast and slow paths. // Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // The fast path produces a tri-state answer: yes / no / maybe-slow.
@ -2577,12 +2582,6 @@ public:
Label& L_success); Label& L_success);
// method handles (JSR 292) // method handles (JSR 292)
void check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type);
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg);
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
// offset relative to Gargs of argument at tos[arg_slot]. // offset relative to Gargs of argument at tos[arg_slot].
// (arg_slot == 0 means the last argument, not the first). // (arg_slot == 0 means the last argument, not the first).
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
@ -2590,7 +2589,7 @@ public:
int extra_slot_offset = 0); int extra_slot_offset = 0);
// Address of Gargs and argument_offset. // Address of Gargs and argument_offset.
Address argument_address(RegisterOrConstant arg_slot, Address argument_address(RegisterOrConstant arg_slot,
Register temp_reg, Register temp_reg = noreg,
int extra_slot_offset = 0); int extra_slot_offset = 0);
// Stack overflow checking // Stack overflow checking

View file

@ -435,85 +435,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
} }
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
if (__ is_in_wdisp16_range(_continuation)) {
__ br_null(src_reg, /*annul*/false, Assembler::pt, _continuation);
} else {
__ cmp(src_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
}
// Generate src->_klass->_reference_type() == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ ldub(ref_type_adr, tmp_reg);
// _reference_type field is of type ReferenceType (enum)
assert(REF_NONE == 0, "check this code");
__ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
__ delayed()->nop();
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_pointer_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ ld(in_progress, tmp_reg);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ ldsb(in_progress, tmp_reg);
}
__ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
__ delayed()->nop();
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
if (__ is_in_wdisp16_range(_continuation)) {
__ br_null(val_reg, /*annul*/false, Assembler::pt, _continuation);
} else {
__ cmp(val_reg, G0);
__ brx(Assembler::equal, false, Assembler::pt, _continuation);
}
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
__ delayed()->mov(val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL; jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() { jbyte* G1PostBarrierStub::byte_map_base_slow() {

View file

@ -2956,6 +2956,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
int bci = op->profiled_bci(); int bci = op->profiled_bci();
ciMethod* callee = op->profiled_callee();
// Update counter for all call types // Update counter for all call types
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
@ -2984,9 +2985,11 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
Bytecodes::Code bc = method->java_code_at_bci(bci); Bytecodes::Code bc = method->java_code_at_bci(bci);
const bool callee_is_static = callee->is_loaded() && callee->is_static();
// Perform additional virtual call profiling for invokevirtual and // Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes // invokeinterface bytecodes
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
!callee_is_static && // required for optimized MH invokes
C1ProfileVirtualCalls) { C1ProfileVirtualCalls) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); assert(op->recv()->is_single_cpu(), "recv must be allocated");
Register recv = op->recv()->as_register(); Register recv = op->recv()->as_register();

View file

@ -515,9 +515,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Need to differentiate between igetfield, agetfield, bgetfield etc. // Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes. // because they are different sizes.
// Get the type from the constant pool cache // Get the type from the constant pool cache
__ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
// Make sure we don't need to mask G1_scratch for tosBits after the above shift // Make sure we don't need to mask G1_scratch after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(G1_scratch, atos ); __ cmp(G1_scratch, atos );
__ br(Assembler::equal, true, Assembler::pt, xreturn_path); __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);

View file

@ -514,7 +514,6 @@ frame frame::sender(RegisterMap* map) const {
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
// explicitly recognized. // explicitly recognized.
if (is_ricochet_frame()) return sender_for_ricochet_frame(map);
bool frame_is_interpreted = is_interpreted_frame(); bool frame_is_interpreted = is_interpreted_frame();
if (frame_is_interpreted) { if (frame_is_interpreted) {
@ -821,9 +820,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1); values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
} }
if (is_ricochet_frame()) { if (is_interpreted_frame()) {
MethodHandles::RicochetFrame::describe(this, values, frame_no);
} else if (is_interpreted_frame()) {
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp); DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp); DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_padding); DESCRIBE_FP_OFFSET(interpreter_frame_padding);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -75,4 +75,43 @@ define_pd_global(bool, UseMembar, false);
// GC Ergo Flags // GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
product(intx, UseVIS, 99, \
"Highest supported VIS instructions set on Sparc") \
\
product(bool, UseCBCond, false, \
"Use compare and branch instruction on SPARC") \
\
product(bool, UseBlockZeroing, false, \
"Use special cpu instructions for block zeroing") \
\
product(intx, BlockZeroingLowLimit, 2048, \
"Minimum size in bytes when block zeroing will be used") \
\
product(bool, UseBlockCopy, false, \
"Use special cpu instructions for block copy") \
\
product(intx, BlockCopyLowLimit, 2048, \
"Minimum size in bytes when block copy will be used") \
\
develop(bool, UseV8InstrsOnly, false, \
"Use SPARC-V8 Compliant instruction subset") \
\
product(bool, UseNiagaraInstrs, false, \
"Use Niagara-efficient instruction subset") \
\
develop(bool, UseCASForSwap, false, \
"Do not use swap instructions, but only CAS (in a loop) on SPARC")\
\
product(uintx, ArraycopySrcPrefetchDistance, 0, \
"Distance to prefetch source array in arracopy") \
\
product(uintx, ArraycopyDstPrefetchDistance, 0, \
"Distance to prefetch destination array in arracopy") \
\
develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
"Number of times to spin wait on a v8 atomic operation lock") \
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP

View file

@ -505,7 +505,7 @@ void InterpreterMacroAssembler::store_ptr(int n, Register val) {
void InterpreterMacroAssembler::load_receiver(Register param_count, void InterpreterMacroAssembler::load_receiver(Register param_count,
Register recv) { Register recv) {
sll(param_count, Interpreter::logStackElementSize, param_count); sll(param_count, Interpreter::logStackElementSize, param_count);
ld_ptr(Lesp, param_count, recv); // gets receiver Oop ld_ptr(Lesp, param_count, recv); // gets receiver oop
} }
void InterpreterMacroAssembler::empty_expression_stack() { void InterpreterMacroAssembler::empty_expression_stack() {
@ -767,8 +767,12 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
const int shift_count = (1 + byte_no) * BitsPerByte; const int shift_count = (1 + byte_no) * BitsPerByte;
srl( bytecode, shift_count, bytecode); assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
and3(bytecode, 0xFF, bytecode); (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
"correct shift count");
srl(bytecode, shift_count, bytecode);
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode);
} }

View file

@ -32,7 +32,6 @@
address generate_normal_entry(bool synchronized); address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized); address generate_native_entry(bool synchronized);
address generate_abstract_entry(void); address generate_abstract_entry(void);
address generate_method_handle_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void); address generate_empty_entry(void);
address generate_accessor_entry(void); address generate_accessor_entry(void);

View file

@ -255,17 +255,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
} }
// Method handle invoker
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableInvokeDynamic) {
return generate_abstract_entry();
}
return MethodHandles::generate_method_handle_interpreter_entry(_masm);
}
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout // Entry points & stack frame layout
// //
@ -395,7 +384,7 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
case Interpreter::java_lang_math_sin : break; case Interpreter::java_lang_math_sin : break;
case Interpreter::java_lang_math_cos : break; case Interpreter::java_lang_math_cos : break;
case Interpreter::java_lang_math_tan : break; case Interpreter::java_lang_math_tan : break;
@ -407,7 +396,9 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_exp : break; case Interpreter::java_lang_math_exp : break;
case Interpreter::java_lang_ref_reference_get case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break; default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
} }
if (entry_point) return entry_point; if (entry_point) return entry_point;

File diff suppressed because it is too large Load diff

View file

@ -30,186 +30,9 @@ enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000)) adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000))
}; };
public:
class RicochetFrame : public ResourceObj {
friend class MethodHandles;
private:
/*
RF field x86 SPARC
sender_pc *(rsp+0) I7-0x8
sender_link rbp I6+BIAS
exact_sender_sp rsi/r13 I5_savedSP
conversion *(rcx+&amh_conv) L5_conv
saved_args_base rax L4_sab (cf. Gargs = G4)
saved_args_layout #NULL L3_sal
saved_target *(rcx+&mh_vmtgt) L2_stgt
continuation #STUB_CON L1_cont
*/
static const Register L1_continuation ; // what to do when control gets back here
static const Register L2_saved_target ; // target method handle to invoke on saved_args
static const Register L3_saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
static const Register L4_saved_args_base ; // base of pushed arguments (slot 0, arg N) (-3)
static const Register L5_conversion ; // misc. information from original AdapterMethodHandle (-2)
frame _fr;
RicochetFrame(const frame& fr) : _fr(fr) { }
intptr_t* register_addr(Register reg) const {
assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree");
return _fr.register_addr(reg);
}
intptr_t register_value(Register reg) const { return *register_addr(reg); }
public:
intptr_t* continuation() const { return (intptr_t*) register_value(L1_continuation); }
oop saved_target() const { return (oop) register_value(L2_saved_target); }
oop saved_args_layout() const { return (oop) register_value(L3_saved_args_layout); }
intptr_t* saved_args_base() const { return (intptr_t*) register_value(L4_saved_args_base); }
intptr_t conversion() const { return register_value(L5_conversion); }
intptr_t* exact_sender_sp() const { return (intptr_t*) register_value(I5_savedSP); }
intptr_t* sender_link() const { return _fr.sender_sp(); } // XXX
address sender_pc() const { return _fr.sender_pc(); }
// This value is not used for much, but it apparently must be nonzero.
static int frame_size_in_bytes() { return wordSize * 4; }
intptr_t* extended_sender_sp() const { return saved_args_base(); }
intptr_t return_value_slot_number() const {
return adapter_conversion_vminfo(conversion());
}
BasicType return_value_type() const {
return adapter_conversion_dest_type(conversion());
}
bool has_return_value_slot() const {
return return_value_type() != T_VOID;
}
intptr_t* return_value_slot_addr() const {
assert(has_return_value_slot(), "");
return saved_arg_slot_addr(return_value_slot_number());
}
intptr_t* saved_target_slot_addr() const {
return saved_arg_slot_addr(saved_args_length());
}
intptr_t* saved_arg_slot_addr(int slot) const {
assert(slot >= 0, "");
return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
}
jint saved_args_length() const;
jint saved_arg_offset(int arg) const;
// GC interface
oop* saved_target_addr() { return (oop*)register_addr(L2_saved_target); }
oop* saved_args_layout_addr() { return (oop*)register_addr(L3_saved_args_layout); }
oop compute_saved_args_layout(bool read_cache, bool write_cache);
#ifdef ASSERT
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
static const Register L0_magic_number_1 ; // cookie for debugging, at start of RSA
static Address magic_number_2_addr() { return Address(L4_saved_args_base, -wordSize); }
intptr_t magic_number_1() const { return register_value(L0_magic_number_1); }
intptr_t magic_number_2() const { return saved_args_base()[-1]; }
#endif //ASSERT
public:
enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
static void generate_ricochet_blob(MacroAssembler* _masm,
// output params:
int* bounce_offset,
int* exception_offset,
int* frame_size_in_words);
static void enter_ricochet_frame(MacroAssembler* _masm,
Register recv_reg,
Register argv_reg,
address return_handler);
static void leave_ricochet_frame(MacroAssembler* _masm,
Register recv_reg,
Register new_sp_reg,
Register sender_pc_reg);
static RicochetFrame* from_frame(const frame& fr) {
RicochetFrame* rf = new RicochetFrame(fr);
rf->verify();
return rf;
}
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
};
// Additional helper methods for MethodHandles code generation: // Additional helper methods for MethodHandles code generation:
public: public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg); static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg);
static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
static void load_stack_move(MacroAssembler* _masm,
Address G3_amh_conversion,
Register G5_stack_move);
static void insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg);
static void remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg);
static void push_arg_slots(MacroAssembler* _masm,
Register argslot_reg,
RegisterOrConstant slot_count,
Register temp_reg, Register temp2_reg);
static void move_arg_slots_up(MacroAssembler* _masm,
Register bottom_reg, // invariant
Address top_addr, // can use temp_reg
RegisterOrConstant positive_distance_in_slots,
Register temp_reg, Register temp2_reg);
static void move_arg_slots_down(MacroAssembler* _masm,
Address bottom_addr, // can use temp_reg
Register top_reg, // invariant
RegisterOrConstant negative_distance_in_slots,
Register temp_reg, Register temp2_reg);
static void move_typed_arg(MacroAssembler* _masm,
BasicType type, bool is_element,
Address value_src, Address slot_dest,
Register temp_reg);
static void move_return_value(MacroAssembler* _masm, BasicType type,
Address return_slot);
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
Register temp_reg,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_argslots(MacroAssembler* _masm,
RegisterOrConstant argslot_count,
Register argslot_reg,
Register temp_reg,
Register temp2_reg,
bool negate_argslot,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_stack_move(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int direction) NOT_DEBUG_RETURN;
static void verify_klass(MacroAssembler* _masm, static void verify_klass(MacroAssembler* _masm,
Register obj_reg, KlassHandle klass, Register obj_reg, KlassHandle klass,
@ -223,8 +46,17 @@ public:
"reference is a MH"); "reference is a MH");
} }
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
// Similar to InterpreterMacroAssembler::jump_from_interpreted. // Similar to InterpreterMacroAssembler::jump_from_interpreted.
// Takes care of special dispatch from single stepping too. // Takes care of special dispatch from single stepping too.
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2); static void jump_from_method_handle(MacroAssembler* _masm, Register method,
Register temp, Register temp2,
bool for_compiler_entry);
static void jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2, Register temp3,
bool for_compiler_entry);
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;

View file

@ -400,13 +400,13 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_LONG: // LP64, longs compete with int args case T_LONG: // LP64, longs compete with int args
assert(sig_bt[i+1] == T_VOID, ""); assert(sig_bt[i+1] == T_VOID, "");
#ifdef _LP64 #ifdef _LP64
if (int_reg_cnt < int_reg_max) int_reg_cnt++; if (int_reg_cnt < int_reg_max) int_reg_cnt++;
#endif #endif
break; break;
case T_OBJECT: case T_OBJECT:
case T_ARRAY: case T_ARRAY:
case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
if (int_reg_cnt < int_reg_max) int_reg_cnt++; if (int_reg_cnt < int_reg_max) int_reg_cnt++;
#ifndef _LP64 #ifndef _LP64
else stk_reg_pairs++; else stk_reg_pairs++;
#endif #endif
@ -416,11 +416,11 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_CHAR: case T_CHAR:
case T_BYTE: case T_BYTE:
case T_BOOLEAN: case T_BOOLEAN:
if (int_reg_cnt < int_reg_max) int_reg_cnt++; if (int_reg_cnt < int_reg_max) int_reg_cnt++;
else stk_reg_pairs++; else stk_reg_pairs++;
break; break;
case T_FLOAT: case T_FLOAT:
if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
else stk_reg_pairs++; else stk_reg_pairs++;
break; break;
case T_DOUBLE: case T_DOUBLE:
@ -436,7 +436,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
// This is where the longs/doubles start on the stack. // This is where the longs/doubles start on the stack.
stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
int flt_reg_pairs = (flt_reg_cnt+1) & ~1; int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
// int stk_reg = frame::register_save_words*(wordSize>>2); // int stk_reg = frame::register_save_words*(wordSize>>2);
@ -517,24 +516,15 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#else // COMPILER2 #else // COMPILER2
if (int_reg_pairs + 1 < int_reg_max) {
if (is_outgoing) {
regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
} else {
regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
}
int_reg_pairs += 2;
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
stk_reg_pairs += 2; stk_reg_pairs += 2;
}
#endif // COMPILER2 #endif // COMPILER2
#endif // _LP64 #endif // _LP64
break; break;
case T_FLOAT: case T_FLOAT:
if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg()); if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
else regs[i].set1( VMRegImpl::stack2reg(stk_reg++)); else regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
break; break;
case T_DOUBLE: case T_DOUBLE:
assert(sig_bt[i+1] == T_VOID, "expecting half"); assert(sig_bt[i+1] == T_VOID, "expecting half");
@ -886,6 +876,20 @@ void AdapterGenerator::gen_c2i_adapter(
__ delayed()->add(SP, G1, Gargs); __ delayed()->add(SP, G1, Gargs);
} }
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
address code_start, address code_end,
Label& L_ok) {
Label L_fail;
__ set(ExternalAddress(code_start), temp_reg);
__ set(pointer_delta(code_end, code_start, 1), temp2_reg);
__ cmp(pc_reg, temp_reg);
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
__ delayed()->add(temp_reg, temp2_reg, temp_reg);
__ cmp(pc_reg, temp_reg);
__ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
__ bind(L_fail);
}
void AdapterGenerator::gen_i2c_adapter( void AdapterGenerator::gen_i2c_adapter(
int total_args_passed, int total_args_passed,
// VMReg max_arg, // VMReg max_arg,
@ -907,6 +911,51 @@ void AdapterGenerator::gen_i2c_adapter(
// This removes all sorts of headaches on the x86 side and also eliminates // This removes all sorts of headaches on the x86 side and also eliminates
// the possibility of having c2i -> i2c -> c2i -> ... endless transitions. // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
// More detail:
// Adapters can be frameless because they do not require the caller
// to perform additional cleanup work, such as correcting the stack pointer.
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
// even if a callee has modified the stack pointer.
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
// routinely repairs its caller's stack pointer (from sender_sp, which is set
// up via the senderSP register).
// In other words, if *either* the caller or callee is interpreted, we can
// get the stack pointer repaired after a call.
// This is why c2i and i2c adapters cannot be indefinitely composed.
// In particular, if a c2i adapter were to somehow call an i2c adapter,
// both caller and callee would be compiled methods, and neither would
// clean up the stack pointer changes performed by the two adapters.
// If this happens, control eventually transfers back to the compiled
// caller, but with an uncorrected stack, causing delayed havoc.
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
// So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr),
// "i2c adapter must return to an interpreter frame");
__ block_comment("verify_i2c { ");
Label L_ok;
if (Interpreter::code() != NULL)
range_check(masm, O7, O0, O1,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
range_check(masm, O7, O0, O1,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
range_check(masm, O7, O0, O1,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
const char* msg = "i2c adapter must return to an interpreter frame";
__ block_comment(msg);
__ stop(msg);
__ bind(L_ok);
__ block_comment("} verify_i2ce ");
}
// As you can see from the list of inputs & outputs there are not a lot // As you can see from the list of inputs & outputs there are not a lot
// of temp registers to work with: mostly G1, G3 & G4. // of temp registers to work with: mostly G1, G3 & G4.
@ -1937,20 +1986,156 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
__ bind(done); __ bind(done);
} }
static void verify_oop_args(MacroAssembler* masm,
int total_args_passed,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = G5_method; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_OBJECT ||
sig_bt[i] == T_ARRAY) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
__ ld_ptr(SP, ld_off, temp_reg);
__ verify_oop(temp_reg);
} else {
__ verify_oop(r->as_Register());
}
}
}
}
}
static void gen_special_dispatch(MacroAssembler* masm,
int total_args_passed,
int comp_args_on_stack,
vmIntrinsics::ID special_dispatch,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, total_args_passed, sig_bt, regs);
// Now write the args into the outgoing interpreter space
bool has_receiver = false;
Register receiver_reg = noreg;
int member_arg_pos = -1;
Register member_reg = noreg;
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
if (ref_kind != 0) {
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
member_reg = G5_method; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (special_dispatch == vmIntrinsics::_invokeBasic) {
has_receiver = true;
} else {
fatal(err_msg("special_dispatch=%d", special_dispatch));
}
if (member_reg != noreg) {
// Load the member_arg into register, if necessary.
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
VMReg r = regs[member_arg_pos].first();
assert(r->is_valid(), "bad member arg");
if (r->is_stack()) {
RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
__ ld_ptr(SP, ld_off, member_reg);
} else {
// no data motion is needed
member_reg = r->as_Register();
}
}
if (has_receiver) {
// Make sure the receiver is loaded into a register.
assert(total_args_passed > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg");
if (r->is_stack()) {
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert(false, "receiver always in a register");
receiver_reg = G3_scratch; // known to be free at this point
RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
__ ld_ptr(SP, ld_off, receiver_reg);
} else {
// no data motion is needed
receiver_reg = r->as_Register();
}
}
// Figure out which address we are really jumping to:
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments // Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native // in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call, // convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and // returns to java state (possibly blocking), unhandlizes any result and
// returns. // returns.
//
// Critical native functions are a shorthand for the use of
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point
// check for safepoint in progress
// check if any thread suspend flags are set
// call into JVM and possible unlock the JNI critical
// if a GC was suppressed while in the critical native.
// transition back to thread_in_Java
// return to caller
//
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method, methodHandle method,
int compile_id, int compile_id,
int total_in_args, int total_in_args,
int comp_args_on_stack, // in VMRegStackSlots int comp_args_on_stack, // in VMRegStackSlots
BasicType *in_sig_bt, BasicType* in_sig_bt,
VMRegPair *in_regs, VMRegPair* in_regs,
BasicType ret_type) { BasicType ret_type) {
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
int vep_offset = ((intptr_t)__ pc()) - start;
gen_special_dispatch(masm,
total_in_args,
comp_args_on_stack,
method->intrinsic_id(),
in_sig_bt,
in_regs);
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
__ flush();
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
return nmethod::new_native_nmethod(method,
compile_id,
masm->code(),
vep_offset,
frame_complete,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
}
bool is_critical_native = true; bool is_critical_native = true;
address native_func = method->critical_native_function(); address native_func = method->critical_native_function();
if (native_func == NULL) { if (native_func == NULL) {

View file

@ -3404,14 +3404,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
#endif // COMPILER2 !=> _LP64 #endif // COMPILER2 !=> _LP64
// Build this early so it's available for the interpreter. The
// stub expects the required and actual type to already be in O1
// and O2 respectively.
StubRoutines::_throw_WrongMethodTypeException_entry =
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
G5_method_type, G3_method_handle);
// Build this early so it's available for the interpreter. // Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
} }

View file

@ -694,9 +694,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Need to differentiate between igetfield, agetfield, bgetfield etc. // Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes. // because they are different sizes.
// Get the type from the constant pool cache // Get the type from the constant pool cache
__ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
// Make sure we don't need to mask G1_scratch for tosBits after the above shift // Make sure we don't need to mask G1_scratch after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(G1_scratch, atos ); __ cmp(G1_scratch, atos );
__ br(Assembler::equal, true, Assembler::pt, xreturn_path); __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
@ -1662,7 +1662,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
} else { } else {
assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases"); assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
// Don't have Lesp available; lay out locals block in the caller // Don't have Lesp available; lay out locals block in the caller
// adjacent to the register window save area. // adjacent to the register window save area.
// //

View file

@ -378,7 +378,7 @@ void TemplateTable::fast_aldc(bool wide) {
Register Rcache = G3_scratch; Register Rcache = G3_scratch;
Register Rscratch = G4_scratch; Register Rscratch = G4_scratch;
resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); resolve_cache_and_index(f12_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
__ verify_oop(Otos_i); __ verify_oop(Otos_i);
@ -2093,10 +2093,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
// Depends on cpCacheOop layout! // Depends on cpCacheOop layout!
Label resolved; Label resolved;
if (byte_no == f1_oop) { if (byte_no == f12_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.) // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
// This kind of CP cache entry does not need to match the flags byte, because // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
// there is a 1-1 relation between bytecode type and CP entry type. // there is a 1-1 relation between bytecode type and CP entry type.
// The caller will also load a methodOop from f2.
assert(result != noreg, "");
assert_different_registers(result, Rcache); assert_different_registers(result, Rcache);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
@ -2123,10 +2125,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default : ShouldNotReachHere(); break; default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
} }
// first time invocation - must resolve first // first time invocation - must resolve first
__ call_VM(noreg, entry, O1); __ call_VM(noreg, entry, O1);
@ -2139,48 +2144,54 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
} }
void TemplateTable::load_invoke_cp_cache_entry(int byte_no, void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register Rmethod, Register method,
Register Ritable_index, Register itable_index,
Register Rflags, Register flags,
bool is_invokevirtual, bool is_invokevirtual,
bool is_invokevfinal, bool is_invokevfinal,
bool is_invokedynamic) { bool is_invokedynamic) {
// Uses both G3_scratch and G4_scratch // Uses both G3_scratch and G4_scratch
Register Rcache = G3_scratch; Register cache = G3_scratch;
Register Rscratch = G4_scratch; Register index = G4_scratch;
assert_different_registers(Rcache, Rmethod, Ritable_index); assert_different_registers(cache, method, itable_index);
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
// determine constant pool cache field offsets // determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes( const int method_offset = in_bytes(
cp_base_offset + constantPoolCacheOopDesc::base_offset() +
(is_invokevirtual ((byte_no == f2_byte)
? ConstantPoolCacheEntry::f2_offset() ? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset() : ConstantPoolCacheEntry::f1_offset()
) )
); );
const int flags_offset = in_bytes(cp_base_offset + const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()); ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields // access constant pool cache fields
const int index_offset = in_bytes(cp_base_offset + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()); ConstantPoolCacheEntry::f2_offset());
if (is_invokevfinal) { if (is_invokevfinal) {
__ get_cache_and_index_at_bcp(Rcache, Rscratch, 1); __ get_cache_and_index_at_bcp(cache, index, 1);
__ ld_ptr(Rcache, method_offset, Rmethod); __ ld_ptr(Address(cache, method_offset), method);
} else if (byte_no == f1_oop) { } else if (byte_no == f12_oop) {
// Resolved f1_oop goes directly into 'method' register. // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4)); // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
// See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
__ ld_ptr(Address(cache, index_offset), method);
itable_index = noreg; // hack to disable load below
} else { } else {
resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2)); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ ld_ptr(Rcache, method_offset, Rmethod); __ ld_ptr(Address(cache, method_offset), method);
} }
if (Ritable_index != noreg) { if (itable_index != noreg) {
__ ld_ptr(Rcache, index_offset, Ritable_index); // pick up itable index from f2 also:
assert(byte_no == f1_byte, "already picked up f1");
__ ld_ptr(Address(cache, index_offset), itable_index);
} }
__ ld_ptr(Rcache, flags_offset, Rflags); __ ld_ptr(Address(cache, flags_offset), flags);
} }
// The Rcache register must be set before call // The Rcache register must be set before call
@ -2272,7 +2283,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
if (__ membar_has_effect(membar_bits)) { if (__ membar_has_effect(membar_bits)) {
// Get volatile flag // Get volatile flag
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
__ and3(Rflags, Lscratch, Lscratch); __ and3(Rflags, Lscratch, Lscratch);
} }
@ -2280,9 +2291,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// compute field type // compute field type
Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
// Make sure we don't need to mask Rflags for tosBits after the above shift // Make sure we don't need to mask Rflags after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
// Check atos before itos for getstatic, more likely (in Queens at least) // Check atos before itos for getstatic, more likely (in Queens at least)
__ cmp(Rflags, atos); __ cmp(Rflags, atos);
@ -2445,7 +2456,7 @@ void TemplateTable::fast_accessfield(TosState state) {
if (__ membar_has_effect(membar_bits)) { if (__ membar_has_effect(membar_bits)) {
// Get volatile flag // Get volatile flag
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
} }
switch (bytecode()) { switch (bytecode()) {
@ -2569,9 +2580,9 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool i
Label two_word, valsizeknown; Label two_word, valsizeknown;
__ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
__ mov(Lesp, G4_scratch); __ mov(Lesp, G4_scratch);
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
// Make sure we don't need to mask Rflags for tosBits after the above shift // Make sure we don't need to mask Rflags after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(Rflags, ltos); __ cmp(Rflags, ltos);
__ br(Assembler::equal, false, Assembler::pt, two_word); __ br(Assembler::equal, false, Assembler::pt, two_word);
__ delayed()->cmp(Rflags, dtos); __ delayed()->cmp(Rflags, dtos);
@ -2625,7 +2636,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Label notVolatile, checkVolatile, exit; Label notVolatile, checkVolatile, exit;
if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
__ and3(Rflags, Lscratch, Lscratch); __ and3(Rflags, Lscratch, Lscratch);
if (__ membar_has_effect(read_bits)) { if (__ membar_has_effect(read_bits)) {
@ -2635,9 +2646,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
} }
} }
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
// Make sure we don't need to mask Rflags for tosBits after the above shift // Make sure we don't need to mask Rflags after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
// compute field type // compute field type
Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
@ -2833,7 +2844,7 @@ void TemplateTable::fast_storefield(TosState state) {
Label notVolatile, checkVolatile, exit; Label notVolatile, checkVolatile, exit;
if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
__ and3(Rflags, Lscratch, Lscratch); __ and3(Rflags, Lscratch, Lscratch);
if (__ membar_has_effect(read_bits)) { if (__ membar_has_effect(read_bits)) {
__ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
@ -2916,7 +2927,7 @@ void TemplateTable::fast_xaccess(TosState state) {
// Test volatile // Test volatile
Label notVolatile; Label notVolatile;
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
__ btst(Rflags, Lscratch); __ btst(Rflags, Lscratch);
__ br(Assembler::zero, false, Assembler::pt, notVolatile); __ br(Assembler::zero, false, Assembler::pt, notVolatile);
__ delayed()->nop(); __ delayed()->nop();
@ -2936,27 +2947,82 @@ void TemplateTable::count_calls(Register method, Register temp) {
ShouldNotReachHere(); ShouldNotReachHere();
} }
void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register ra, // return address
Register index, // itable index, MethodType, etc.
Register recv, // if caller wants to see it
Register flags // if caller wants to test it
) {
// determine flags
const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (recv != noreg);
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert(recv == noreg || recv == O0, "");
assert(flags == noreg || flags == O1, "");
// setup registers & access constant pool cache
if (recv == noreg) recv = O0;
if (flags == noreg) flags = O1;
const Register temp = O2;
assert_different_registers(method, ra, index, recv, flags, temp);
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// maybe push appendix to arguments
if (is_invokedynamic || is_invokehandle) {
Label L_no_push;
__ verify_oop(index);
__ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
__ btst(flags, temp);
__ br(Assembler::zero, false, Assembler::pt, L_no_push);
__ delayed()->nop();
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
__ push_ptr(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
}
// load receiver if needed (after appendix is pushed so parameter size is correct)
if (load_receiver) {
__ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
__ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
__ verify_oop(recv);
}
// compute return type
__ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
// Make sure we don't need to mask flags after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address
{
const address table_addr = (is_invokeinterface || is_invokedynamic) ?
(address)Interpreter::return_5_addrs_by_index_table() :
(address)Interpreter::return_3_addrs_by_index_table();
AddressLiteral table(table_addr);
__ set(table, temp);
__ sll(ra, LogBytesPerWord, ra);
__ ld_ptr(Address(temp, ra), ra);
}
}
void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
Register Rtemp = G4_scratch; Register Rtemp = G4_scratch;
Register Rcall = Rindex; Register Rcall = Rindex;
assert_different_registers(Rcall, G5_method, Gargs, Rret); assert_different_registers(Rcall, G5_method, Gargs, Rret);
// get target methodOop & entry point // get target methodOop & entry point
const int base = instanceKlass::vtable_start_offset() * wordSize; __ lookup_virtual_method(Rrecv, Rindex, G5_method);
if (vtableEntry::size() % 3 == 0) {
// scale the vtable index by 12:
int one_third = vtableEntry::size() / 3;
__ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
__ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
__ add(Rindex, Rtemp, Rindex);
} else {
// scale the vtable index by 8:
__ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
}
__ add(Rrecv, Rindex, Rrecv);
__ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
__ call_from_interpreter(Rcall, Gargs, Rret); __ call_from_interpreter(Rcall, Gargs, Rret);
} }
@ -2965,16 +3031,16 @@ void TemplateTable::invokevirtual(int byte_no) {
assert(byte_no == f2_byte, "use this argument"); assert(byte_no == f2_byte, "use this argument");
Register Rscratch = G3_scratch; Register Rscratch = G3_scratch;
Register Rtemp = G4_scratch; Register Rtemp = G4_scratch;
Register Rret = Lscratch; Register Rret = Lscratch;
Register Rrecv = G5_method; Register O0_recv = O0;
Label notFinal; Label notFinal;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// Check for vfinal // Check for vfinal
__ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch); __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
__ btst(Rret, G4_scratch); __ btst(Rret, G4_scratch);
__ br(Assembler::zero, false, Assembler::pt, notFinal); __ br(Assembler::zero, false, Assembler::pt, notFinal);
__ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
@ -2986,27 +3052,27 @@ void TemplateTable::invokevirtual(int byte_no) {
__ bind(notFinal); __ bind(notFinal);
__ mov(G5_method, Rscratch); // better scratch register __ mov(G5_method, Rscratch); // better scratch register
__ load_receiver(G4_scratch, O0); // gets receiverOop __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
// receiver is in O0 // receiver is in O0_recv
__ verify_oop(O0); __ verify_oop(O0_recv);
// get return address // get return address
AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ sll(Rret, LogBytesPerWord, Rret); __ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address __ ld_ptr(Rtemp, Rret, Rret); // get return address
// get receiver klass // get receiver klass
__ null_check(O0, oopDesc::klass_offset_in_bytes()); __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
__ load_klass(O0, Rrecv); __ load_klass(O0_recv, O0_recv);
__ verify_oop(Rrecv); __ verify_oop(O0_recv);
__ profile_virtual_call(Rrecv, O4); __ profile_virtual_call(O0_recv, O4);
generate_vtable_call(Rrecv, Rscratch, Rret); generate_vtable_call(O0_recv, Rscratch, Rret);
} }
void TemplateTable::fast_invokevfinal(int byte_no) { void TemplateTable::fast_invokevfinal(int byte_no) {
@ -3036,9 +3102,9 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
// get return address // get return address
AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ sll(Rret, LogBytesPerWord, Rret); __ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address __ ld_ptr(Rtemp, Rret, Rret); // get return address
@ -3047,65 +3113,37 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
__ call_from_interpreter(Rscratch, Gargs, Rret); __ call_from_interpreter(Rscratch, Gargs, Rret);
} }
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch; const Register Rret = Lscratch;
Register Rtemp = G4_scratch; const Register O0_recv = O0;
Register Rret = Lscratch; const Register Rscratch = G3_scratch;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore __ null_check(O0_recv);
__ verify_oop(G5_method);
__ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0);
// receiver NULL check
__ null_check(O0);
__ profile_call(O4);
// get return address
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address
// do the call // do the call
__ verify_oop(G5_method);
__ profile_call(O4);
__ call_from_interpreter(Rscratch, Gargs, Rret); __ call_from_interpreter(Rscratch, Gargs, Rret);
} }
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G3_scratch; const Register Rret = Lscratch;
Register Rtemp = G4_scratch; const Register Rscratch = G3_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); prepare_invoke(byte_no, G5_method, Rret); // get f1 methodOop
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
__ verify_oop(G5_method);
__ profile_call(O4);
// get return address
AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address
// do the call // do the call
__ verify_oop(G5_method);
__ profile_call(O4);
__ call_from_interpreter(Rscratch, Gargs, Rret); __ call_from_interpreter(Rscratch, Gargs, Rret);
} }
@ -3122,7 +3160,7 @@ void TemplateTable::invokeinterface_object_method(Register RklassOop,
Label notFinal; Label notFinal;
// Check for vfinal // Check for vfinal
__ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch); __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
__ btst(Rflags, Rscratch); __ btst(Rflags, Rscratch);
__ br(Assembler::zero, false, Assembler::pt, notFinal); __ br(Assembler::zero, false, Assembler::pt, notFinal);
__ delayed()->nop(); __ delayed()->nop();
@ -3144,53 +3182,37 @@ void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
Register Rscratch = G4_scratch; const Register Rinterface = G1_scratch;
Register Rret = G3_scratch; const Register Rret = G3_scratch;
Register Rindex = Lscratch; const Register Rindex = Lscratch;
Register Rinterface = G1_scratch; const Register O0_recv = O0;
Register RklassOop = G5_method; const Register O1_flags = O1;
Register Rflags = O1; const Register O2_klassOop = O2;
const Register Rscratch = G4_scratch;
assert_different_registers(Rscratch, G5_method); assert_different_registers(Rscratch, G5_method);
load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false); prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// get receiver
__ and3(Rflags, 0xFF, Rscratch); // gets number of parameters
__ load_receiver(Rscratch, O0);
__ verify_oop(O0);
__ mov(Rflags, Rret);
// get return address
AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
__ set(table, Rscratch);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rscratch, Rret, Rret); // get return address
// get receiver klass // get receiver klass
__ null_check(O0, oopDesc::klass_offset_in_bytes()); __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
__ load_klass(O0, RklassOop); __ load_klass(O0_recv, O2_klassOop);
__ verify_oop(RklassOop); __ verify_oop(O2_klassOop);
// Special case of invokeinterface called for virtual method of // Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details. // java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by // This code isn't produced by javac, but could be produced by
// another compliant java compiler. // another compliant java compiler.
Label notMethod; Label notMethod;
__ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch); __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
__ btst(Rflags, Rscratch); __ btst(O1_flags, Rscratch);
__ br(Assembler::zero, false, Assembler::pt, notMethod); __ br(Assembler::zero, false, Assembler::pt, notMethod);
__ delayed()->nop(); __ delayed()->nop();
invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags); invokeinterface_object_method(O2_klassOop, Rinterface, Rret, O1_flags);
__ bind(notMethod); __ bind(notMethod);
__ profile_virtual_call(RklassOop, O4); __ profile_virtual_call(O2_klassOop, O4);
// //
// find entry point to call // find entry point to call
@ -3199,9 +3221,9 @@ void TemplateTable::invokeinterface(int byte_no) {
// compute start of first itableOffsetEntry (which is at end of vtable) // compute start of first itableOffsetEntry (which is at end of vtable)
const int base = instanceKlass::vtable_start_offset() * wordSize; const int base = instanceKlass::vtable_start_offset() * wordSize;
Label search; Label search;
Register Rtemp = Rflags; Register Rtemp = O1_flags;
__ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); __ ld(O2_klassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
if (align_object_offset(1) > 1) { if (align_object_offset(1) > 1) {
__ round_to(Rtemp, align_object_offset(1)); __ round_to(Rtemp, align_object_offset(1));
} }
@ -3212,7 +3234,7 @@ void TemplateTable::invokeinterface(int byte_no) {
__ set(base, Rscratch); __ set(base, Rscratch);
__ add(Rscratch, Rtemp, Rtemp); __ add(Rscratch, Rtemp, Rtemp);
} }
__ add(RklassOop, Rtemp, Rscratch); __ add(O2_klassOop, Rtemp, Rscratch);
__ bind(search); __ bind(search);
@ -3244,7 +3266,7 @@ void TemplateTable::invokeinterface(int byte_no) {
assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
__ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
__ add(Rscratch, Rindex, Rscratch); __ add(Rscratch, Rindex, Rscratch);
__ ld_ptr(RklassOop, Rscratch, G5_method); __ ld_ptr(O2_klassOop, Rscratch, G5_method);
// Check for abstract method error. // Check for abstract method error.
{ {
@ -3260,13 +3282,42 @@ void TemplateTable::invokeinterface(int byte_no) {
__ verify_oop(G5_method); __ verify_oop(G5_method);
__ call_from_interpreter(Rcall, Gargs, Rret); __ call_from_interpreter(Rcall, Gargs, Rret);
}
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f12_oop, "use this argument");
if (!EnableInvokeDynamic) {
// rewriter does not generate this bytecode
__ should_not_reach_here();
return;
}
const Register Rret = Lscratch;
const Register G4_mtype = G4_scratch; // f1
const Register O0_recv = O0;
const Register Rscratch = G3_scratch;
prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
__ null_check(O0_recv);
// G4: MethodType object (from f1)
// G5: MH.linkToCallSite method (from f2)
// Note: G4_mtype is already pushed (if necessary) by prepare_invoke
// do the call
__ verify_oop(G5_method);
__ profile_final_call(O4); // FIXME: profile the LambdaForm also
__ call_from_interpreter(Rscratch, Gargs, Rret);
} }
void TemplateTable::invokedynamic(int byte_no) { void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument"); assert(byte_no == f12_oop, "use this argument");
if (!EnableInvokeDynamic) { if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic. // We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3279,42 +3330,24 @@ void TemplateTable::invokedynamic(int byte_no) {
return; return;
} }
// G5: CallSite object (f1) const Register Rret = Lscratch;
// XX: unused (f2) const Register G4_callsite = G4_scratch;
// XX: flags (unused) const Register Rscratch = G3_scratch;
Register G5_callsite = G5_method; prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
Register Rscratch = G3_scratch;
Register Rtemp = G1_scratch;
Register Rret = Lscratch;
load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, // G4: CallSite object (from f1)
/*virtual*/ false, /*vfinal*/ false, /*indy*/ true); // G5: MH.linkToCallSite method (from f2)
__ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
// Note: G4_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call // profile this call
__ profile_call(O4); __ profile_call(O4);
// get return address // do the call
AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); __ verify_oop(G5_method);
__ set(table, Rtemp); __ call_from_interpreter(Rscratch, Gargs, Rret);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address
__ verify_oop(G5_callsite);
__ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
__ null_check(G3_method_handle);
__ verify_oop(G3_method_handle);
// Adjust Rret first so Llast_SP can be same as Rret
__ add(Rret, -frame::pc_return_offset, O7);
__ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
__ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
// Record SP so we can remove any stack space allocated by adapter transition
__ delayed()->mov(SP, Llast_SP);
} }

View file

@ -25,6 +25,13 @@
#ifndef CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP #ifndef CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP
#define CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP #define CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register ra, // return address
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // if caller wants to see it
Register flags = noreg // if caller wants to test it
);
// helper function // helper function
static void invokevfinal_helper(Register Rcache, Register Rret); static void invokevfinal_helper(Register Rcache, Register Rret);
static void invokeinterface_object_method(Register RklassOop, Register Rcall, static void invokeinterface_object_method(Register RklassOop, Register Rcall,

View file

@ -106,10 +106,10 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) { if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
FLAG_SET_DEFAULT(OptoLoopAlignment, 4); FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
} }
// When using CMS, we cannot use memset() in BOT updates because // When using CMS or G1, we cannot use memset() in BOT updates
// the sun4v/CMT version in libc_psr uses BIS which exposes // because the sun4v/CMT version in libc_psr uses BIS which
// "phantom zeros" to concurrent readers. See 6948537. // exposes "phantom zeros" to concurrent readers. See 6948537.
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) { if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
FLAG_SET_DEFAULT(UseMemSetInBOT, false); FLAG_SET_DEFAULT(UseMemSetInBOT, false);
} }
#ifdef _LP64 #ifdef _LP64

View file

@ -44,10 +44,11 @@ protected:
fmaf_instructions = 10, fmaf_instructions = 10,
fmau_instructions = 11, fmau_instructions = 11,
vis3_instructions = 12, vis3_instructions = 12,
sparc64_family = 13, cbcond_instructions = 13,
T_family = 14, sparc64_family = 14,
T1_model = 15, M_family = 15,
cbcond_instructions = 16 T_family = 16,
T1_model = 17
}; };
enum Feature_Flag_Set { enum Feature_Flag_Set {
@ -67,10 +68,11 @@ protected:
fmaf_instructions_m = 1 << fmaf_instructions, fmaf_instructions_m = 1 << fmaf_instructions,
fmau_instructions_m = 1 << fmau_instructions, fmau_instructions_m = 1 << fmau_instructions,
vis3_instructions_m = 1 << vis3_instructions, vis3_instructions_m = 1 << vis3_instructions,
cbcond_instructions_m = 1 << cbcond_instructions,
sparc64_family_m = 1 << sparc64_family, sparc64_family_m = 1 << sparc64_family,
M_family_m = 1 << M_family,
T_family_m = 1 << T_family, T_family_m = 1 << T_family,
T1_model_m = 1 << T1_model, T1_model_m = 1 << T1_model,
cbcond_instructions_m = 1 << cbcond_instructions,
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m, generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
generic_v9_m = generic_v8_m | v9_instructions_m, generic_v9_m = generic_v8_m | v9_instructions_m,
@ -89,6 +91,7 @@ protected:
static int platform_features(int features); static int platform_features(int features);
// Returns true if the platform is in the niagara line (T series) // Returns true if the platform is in the niagara line (T series)
static bool is_M_family(int features) { return (features & M_family_m) != 0; }
static bool is_T_family(int features) { return (features & T_family_m) != 0; } static bool is_T_family(int features) { return (features & T_family_m) != 0; }
static bool is_niagara() { return is_T_family(_features); } static bool is_niagara() { return is_T_family(_features); }
DEBUG_ONLY( static bool is_niagara(int features) { return (features & sun4v_m) != 0; } ) DEBUG_ONLY( static bool is_niagara(int features) { return (features & sun4v_m) != 0; } )

View file

@ -70,7 +70,6 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ load_klass(O0, G3_scratch); __ load_klass(O0, G3_scratch);
// set methodOop (in case of interpreted method), and destination address // set methodOop (in case of interpreted method), and destination address
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT #ifndef PRODUCT
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
@ -82,13 +81,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ bind(L); __ bind(L);
} }
#endif #endif
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
if (Assembler::is_simm13(v_off)) { __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);
__ ld_ptr(G3, v_off, G5_method);
} else {
__ set(v_off,G5);
__ ld_ptr(G3, G5, G5_method);
}
#ifndef PRODUCT #ifndef PRODUCT
if (DebugVtables) { if (DebugVtables) {

File diff suppressed because it is too large Load diff

View file

@ -617,6 +617,7 @@ private:
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
simd_prefix(dst, xnoreg, src, pre, opc); simd_prefix(dst, xnoreg, src, pre, opc);
} }
void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) { void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
simd_prefix(src, dst, pre); simd_prefix(src, dst, pre);
} }
@ -626,16 +627,10 @@ private:
simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w); simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
} }
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, bool vector256 = false); bool rex_w = false, bool vector256 = false);
int simd_prefix_and_encode(XMMRegister dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
return simd_prefix_and_encode(dst, xnoreg, src, pre, opc);
}
// Move/convert 32-bit integer value. // Move/convert 32-bit integer value.
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src, int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre) { VexSimdPrefix pre) {
@ -677,6 +672,15 @@ private:
void emit_arith(int op1, int op2, Register dst, jobject obj); void emit_arith(int op1, int op2, Register dst, jobject obj);
void emit_arith(int op1, int op2, Register dst, Register src); void emit_arith(int op1, int op2, Register dst, Register src);
void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
Address src, VexSimdPrefix pre, bool vector256);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
XMMRegister src, VexSimdPrefix pre, bool vector256);
void emit_operand(Register reg, void emit_operand(Register reg,
Register base, Register index, Address::ScaleFactor scale, Register base, Register index, Address::ScaleFactor scale,
int disp, int disp,
@ -891,12 +895,6 @@ private:
void andq(Register dst, Address src); void andq(Register dst, Address src);
void andq(Register dst, Register src); void andq(Register dst, Register src);
// Bitwise Logical AND of Packed Double-Precision Floating-Point Values
void andpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical AND of Packed Single-Precision Floating-Point Values
void andps(XMMRegister dst, XMMRegister src);
void bsfl(Register dst, Register src); void bsfl(Register dst, Register src);
void bsrl(Register dst, Register src); void bsrl(Register dst, Register src);
@ -1436,10 +1434,6 @@ private:
void prefetcht2(Address src); void prefetcht2(Address src);
void prefetchw(Address src); void prefetchw(Address src);
// POR - Bitwise logical OR
void por(XMMRegister dst, XMMRegister src);
void por(XMMRegister dst, Address src);
// Shuffle Packed Doublewords // Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode); void pshufd(XMMRegister dst, XMMRegister src, int mode);
void pshufd(XMMRegister dst, Address src, int mode); void pshufd(XMMRegister dst, Address src, int mode);
@ -1448,9 +1442,6 @@ private:
void pshuflw(XMMRegister dst, XMMRegister src, int mode); void pshuflw(XMMRegister dst, XMMRegister src, int mode);
void pshuflw(XMMRegister dst, Address src, int mode); void pshuflw(XMMRegister dst, Address src, int mode);
// Shift Right by bits Logical Quadword Immediate
void psrlq(XMMRegister dst, int shift);
// Shift Right by bytes Logical DoubleQuadword Immediate // Shift Right by bytes Logical DoubleQuadword Immediate
void psrldq(XMMRegister dst, int shift); void psrldq(XMMRegister dst, int shift);
@ -1475,10 +1466,6 @@ private:
void pushq(Address src); void pushq(Address src);
// Xor Packed Byte Integer Values
void pxor(XMMRegister dst, Address src);
void pxor(XMMRegister dst, XMMRegister src);
void rcll(Register dst, int imm8); void rcll(Register dst, int imm8);
void rclq(Register dst, int imm8); void rclq(Register dst, int imm8);
@ -1601,15 +1588,10 @@ private:
void xorq(Register dst, Address src); void xorq(Register dst, Address src);
void xorq(Register dst, Register src); void xorq(Register dst, Register src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
void xorps(XMMRegister dst, XMMRegister src);
void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
// AVX 3-operands scalar instructions (encoded with VEX prefix) // AVX 3-operands scalar instructions (encoded with VEX prefix)
void vaddsd(XMMRegister dst, XMMRegister nds, Address src); void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vaddss(XMMRegister dst, XMMRegister nds, Address src); void vaddss(XMMRegister dst, XMMRegister nds, Address src);
@ -1627,14 +1609,147 @@ private:
void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
// AVX Vector instrucitons.
void vandpd(XMMRegister dst, XMMRegister nds, Address src); //====================VECTOR ARITHMETIC=====================================
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src); // Add Packed Floating-Point Values
void vxorps(XMMRegister dst, XMMRegister nds, Address src); void addpd(XMMRegister dst, XMMRegister src);
void addps(XMMRegister dst, XMMRegister src);
void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Subtract Packed Floating-Point Values
void subpd(XMMRegister dst, XMMRegister src);
void subps(XMMRegister dst, XMMRegister src);
void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Multiply Packed Floating-Point Values
void mulpd(XMMRegister dst, XMMRegister src);
void mulps(XMMRegister dst, XMMRegister src);
void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Divide Packed Floating-Point Values
void divpd(XMMRegister dst, XMMRegister src);
void divps(XMMRegister dst, XMMRegister src);
void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Bitwise Logical AND of Packed Floating-Point Values
void andpd(XMMRegister dst, XMMRegister src);
void andps(XMMRegister dst, XMMRegister src);
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Bitwise Logical XOR of Packed Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, XMMRegister src);
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Add packed integers
void paddb(XMMRegister dst, XMMRegister src);
void paddw(XMMRegister dst, XMMRegister src);
void paddd(XMMRegister dst, XMMRegister src);
void paddq(XMMRegister dst, XMMRegister src);
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Sub packed integers
void psubb(XMMRegister dst, XMMRegister src);
void psubw(XMMRegister dst, XMMRegister src);
void psubd(XMMRegister dst, XMMRegister src);
void psubq(XMMRegister dst, XMMRegister src);
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Multiply packed integers (only shorts and ints)
void pmullw(XMMRegister dst, XMMRegister src);
void pmulld(XMMRegister dst, XMMRegister src);
void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Shift left packed integers
void psllw(XMMRegister dst, int shift);
void pslld(XMMRegister dst, int shift);
void psllq(XMMRegister dst, int shift);
void psllw(XMMRegister dst, XMMRegister shift);
void pslld(XMMRegister dst, XMMRegister shift);
void psllq(XMMRegister dst, XMMRegister shift);
void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
// Logical shift right packed integers
void psrlw(XMMRegister dst, int shift);
void psrld(XMMRegister dst, int shift);
void psrlq(XMMRegister dst, int shift);
void psrlw(XMMRegister dst, XMMRegister shift);
void psrld(XMMRegister dst, XMMRegister shift);
void psrlq(XMMRegister dst, XMMRegister shift);
void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
// Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
void psraw(XMMRegister dst, int shift);
void psrad(XMMRegister dst, int shift);
void psraw(XMMRegister dst, XMMRegister shift);
void psrad(XMMRegister dst, XMMRegister shift);
void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256);
void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
// And packed integers
void pand(XMMRegister dst, XMMRegister src);
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Or packed integers
void por(XMMRegister dst, XMMRegister src);
void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Xor packed integers
void pxor(XMMRegister dst, XMMRegister src);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
// Copy low 128bit into high 128bit of YMM registers.
void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
@ -1940,6 +2055,7 @@ class MacroAssembler: public Assembler {
void load_heap_oop(Register dst, Address src); void load_heap_oop(Register dst, Address src);
void load_heap_oop_not_null(Register dst, Address src); void load_heap_oop_not_null(Register dst, Address src);
void store_heap_oop(Address dst, Register src); void store_heap_oop(Address dst, Register src);
void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
// Used for storing NULL. All other oop constants should be // Used for storing NULL. All other oop constants should be
// stored using routines that take a jobject. // stored using routines that take a jobject.
@ -2117,6 +2233,11 @@ class MacroAssembler: public Assembler {
Register scan_temp, Register scan_temp,
Label& no_such_interface); Label& no_such_interface);
// virtual method calling
void lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result);
// Test sub_klass against super_klass, with fast and slow paths. // Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // The fast path produces a tri-state answer: yes / no / maybe-slow.
@ -2152,15 +2273,8 @@ class MacroAssembler: public Assembler {
Label& L_success); Label& L_success);
// method handles (JSR 292) // method handles (JSR 292)
void check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type);
void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
Register temp_reg);
void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
//---- //----
void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
@ -2179,8 +2293,13 @@ class MacroAssembler: public Assembler {
// prints msg and continues // prints msg and continues
void warn(const char* msg); void warn(const char* msg);
// dumps registers and other state
void print_state();
static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
static void debug64(char* msg, int64_t pc, int64_t regs[]); static void debug64(char* msg, int64_t pc, int64_t regs[]);
static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
static void print_state64(int64_t pc, int64_t regs[]);
void os_breakpoint(); void os_breakpoint();
@ -2528,11 +2647,13 @@ public:
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandpd(dst, nds, src); } void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src); void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vandps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandps(dst, nds, src); } void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src); void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
@ -2561,12 +2682,12 @@ public:
// AVX Vector instructions // AVX Vector instructions
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); } void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src); void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); } void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src); void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
@ -2574,6 +2695,12 @@ public:
else else
Assembler::vxorpd(dst, nds, src, vector256); Assembler::vxorpd(dst, nds, src, vector256);
} }
void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
Assembler::vpxor(dst, nds, src, vector256);
else
Assembler::vxorpd(dst, nds, src, vector256);
}
// Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {

View file

@ -488,68 +488,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
} }
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that offset == referent_offset.
//
// So we might have to emit:
// if (src == null) goto continuation.
//
// and we definitely have to emit:
// if (klass(src).reference_type == REF_NONE) goto continuation
// if (!marking_active) goto continuation
// if (pre_val == null) goto continuation
// call pre_barrier(pre_val)
// goto continuation
//
__ bind(_entry);
assert(src()->is_register(), "sanity");
Register src_reg = src()->as_register();
if (gen_src_check()) {
// The original src operand was not a constant.
// Generate src == null?
__ cmpptr(src_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
}
// Generate src->_klass->_reference_type == REF_NONE)?
assert(tmp()->is_register(), "sanity");
Register tmp_reg = tmp()->as_register();
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ cmpb(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);
// Is marking active?
assert(thread()->is_register(), "precondition");
Register thread_reg = thread()->as_pointer_register();
Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
__ jcc(Assembler::equal, _continuation);
// val == null?
assert(val()->is_register(), "Precondition.");
Register val_reg = val()->as_register();
__ cmpptr(val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(val()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ jmp(_continuation);
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL; jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() { jbyte* G1PostBarrierStub::byte_map_base_slow() {

View file

@ -3508,6 +3508,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
int bci = op->profiled_bci(); int bci = op->profiled_bci();
ciMethod* callee = op->profiled_callee();
// Update counter for all call types // Update counter for all call types
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
@ -3519,9 +3520,11 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ movoop(mdo, md->constant_encoding()); __ movoop(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
Bytecodes::Code bc = method->java_code_at_bci(bci); Bytecodes::Code bc = method->java_code_at_bci(bci);
const bool callee_is_static = callee->is_loaded() && callee->is_static();
// Perform additional virtual call profiling for invokevirtual and // Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes // invokeinterface bytecodes
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
!callee_is_static && // required for optimized MH invokes
C1ProfileVirtualCalls) { C1ProfileVirtualCalls) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); assert(op->recv()->is_single_cpu(), "recv must be allocated");
Register recv = op->recv()->as_register(); Register recv = op->recv()->as_register();

View file

@ -871,9 +871,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Need to differentiate between igetfield, agetfield, bgetfield etc. // Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes. // because they are different sizes.
// Use the type from the constant pool cache // Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tosBits); __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rdx for tosBits after the above shift // Make sure we don't need to mask rdx after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
#ifdef _LP64 #ifdef _LP64
Label notObj; Label notObj;
__ cmpl(rdx, atos); __ cmpl(rdx, atos);

View file

@ -439,7 +439,6 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// frame::sender_for_compiled_frame // frame::sender_for_compiled_frame
frame frame::sender_for_compiled_frame(RegisterMap* map) const { frame frame::sender_for_compiled_frame(RegisterMap* map) const {
assert(map != NULL, "map must be set"); assert(map != NULL, "map must be set");
assert(!is_ricochet_frame(), "caller must handle this");
// frame owned by optimizing compiler // frame owned by optimizing compiler
assert(_cb->frame_size() >= 0, "must have non-zero frame size"); assert(_cb->frame_size() >= 0, "must have non-zero frame size");
@ -483,7 +482,6 @@ frame frame::sender(RegisterMap* map) const {
if (is_entry_frame()) return sender_for_entry_frame(map); if (is_entry_frame()) return sender_for_entry_frame(map);
if (is_interpreted_frame()) return sender_for_interpreter_frame(map); if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (is_ricochet_frame()) return sender_for_ricochet_frame(map);
if (_cb != NULL) { if (_cb != NULL) {
return sender_for_compiled_frame(map); return sender_for_compiled_frame(map);
@ -658,9 +656,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
values.describe(frame_no, fp() + frame::name##_offset, #name) values.describe(frame_no, fp() + frame::name##_offset, #name)
void frame::describe_pd(FrameValues& values, int frame_no) { void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_ricochet_frame()) { if (is_interpreted_frame()) {
MethodHandles::RicochetFrame::describe(this, values, frame_no);
} else if (is_interpreted_frame()) {
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_method); DESCRIBE_FP_OFFSET(interpreter_frame_method);
@ -682,12 +678,7 @@ intptr_t* frame::real_fp() const {
if (_cb != NULL) { if (_cb != NULL) {
// use the frame size if valid // use the frame size if valid
int size = _cb->frame_size(); int size = _cb->frame_size();
if ((size > 0) && if (size > 0) {
(! is_ricochet_frame())) {
// Work-around: ricochet explicitly excluded because frame size is not
// constant for the ricochet blob but its frame_size could not, for
// some reasons, be declared as <= 0. This potentially confusing
// size declaration should be fixed as another CR.
return unextended_sp() + size; return unextended_sp() + size;
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -78,4 +78,53 @@ define_pd_global(bool, UseMembar, false);
// GC Ergo Flags // GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
develop(bool, IEEEPrecision, true, \
"Enables IEEE precision (for INTEL only)") \
\
product(intx, FenceInstruction, 0, \
"(Unsafe,Unstable) Experimental") \
\
product(intx, ReadPrefetchInstr, 0, \
"Prefetch instruction to prefetch ahead") \
\
product(bool, UseStoreImmI16, true, \
"Use store immediate 16-bits value instruction on x86") \
\
product(intx, UseAVX, 99, \
"Highest supported AVX instructions set on x86/x64") \
\
diagnostic(bool, UseIncDec, true, \
"Use INC, DEC instructions on x86") \
\
product(bool, UseNewLongLShift, false, \
"Use optimized bitwise shift left") \
\
product(bool, UseAddressNop, false, \
"Use '0F 1F [addr]' NOP instructions on x86 cpus") \
\
product(bool, UseXmmLoadAndClearUpper, true, \
"Load low part of XMM register and clear upper part") \
\
product(bool, UseXmmRegToRegMoveAll, false, \
"Copy all XMM register bits when moving value between registers") \
\
product(bool, UseXmmI2D, false, \
"Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
\
product(bool, UseXmmI2F, false, \
"Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
\
product(bool, UseUnalignedLoadStores, false, \
"Use SSE2 MOVDQU instruction for Arraycopy") \
\
/* assembler */ \
product(bool, Use486InstrsOnly, false, \
"Use 80486 Compliant instruction subset") \
\
product(bool, UseCountLeadingZerosInstruction, false, \
"Use count leading zeros instruction") \
#endif // CPU_X86_VM_GLOBALS_X86_HPP #endif // CPU_X86_VM_GLOBALS_X86_HPP

View file

@ -253,8 +253,12 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte; const int shift_count = (1 + byte_no) * BitsPerByte;
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
"correct shift count");
shrptr(bytecode, shift_count); shrptr(bytecode, shift_count);
andptr(bytecode, 0xFF); assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
andptr(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
} }

View file

@ -256,8 +256,12 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
// little-endian machines allow us that. // little-endian machines allow us that.
movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte; const int shift_count = (1 + byte_no) * BitsPerByte;
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
"correct shift count");
shrl(bytecode, shift_count); shrl(bytecode, shift_count);
andl(bytecode, 0xFF); assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
} }

View file

@ -35,7 +35,6 @@
address generate_normal_entry(bool synchronized); address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized); address generate_native_entry(bool synchronized);
address generate_abstract_entry(void); address generate_abstract_entry(void);
address generate_method_handle_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void); address generate_empty_entry(void);
address generate_accessor_entry(void); address generate_accessor_entry(void);

View file

@ -243,18 +243,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
} }
// Method handle invoker
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableInvokeDynamic) {
return generate_abstract_entry();
}
address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
return entry_point;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in

View file

@ -325,19 +325,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
} }
// Method handle invoker
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableInvokeDynamic) {
return generate_abstract_entry();
}
address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
return entry_point;
}
// Empty method, generate a very fast return. // Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) { address InterpreterGenerator::generate_empty_entry(void) {

File diff suppressed because it is too large Load diff

View file

@ -27,266 +27,12 @@
// Adapters // Adapters
enum /* platform_dependent_constants */ { enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000)) adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
};
public:
// The stack just after the recursive call from a ricochet frame
// looks something like this. Offsets are marked in words, not bytes.
// rsi (r13 on LP64) is part of the interpreter calling sequence
// which tells the callee where is my real rsp (for frame walking).
// (...lower memory addresses)
// rsp: [ return pc ] always the global RicochetBlob::bounce_addr
// rsp+1: [ recursive arg N ]
// rsp+2: [ recursive arg N-1 ]
// ...
// rsp+N: [ recursive arg 1 ]
// rsp+N+1: [ recursive method handle ]
// ...
// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame)
// rbp-5: [ saved target MH ] the MH we will call on the saved args
// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout
// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0)
// rbp-2: [ conversion ] information about how the return value is used
// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame
// rbp+0: [ saved sender fp ] (for original sender of AMH)
// rbp+1: [ saved sender pc ] (back to original sender of AMH)
// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender)
// rbp+3: [ transformed adapter arg M-1]
// ...
// rbp+M+1: [ transformed adapter arg 1 ]
// rbp+M+2: [ padding ] <-- (rbp + saved args base offset)
// ... [ optional padding]
// (higher memory addresses...)
//
// The arguments originally passed by the original sender
// are lost, and arbitrary amounts of stack motion might have
// happened due to argument transformation.
// (This is done by C2I/I2C adapters and non-direct method handles.)
// This is why there is an unpredictable amount of memory between
// the extended and exact TOS of the sender.
// The ricochet adapter itself will also (in general) perform
// transformations before the recursive call.
//
// The transformed and saved arguments, immediately above the saved
// return PC, are a well-formed method handle invocation ready to execute.
// When the GC needs to walk the stack, these arguments are described
// via the saved arg types oop, an int[] array with a private format.
// This array is derived from the type of the transformed adapter
// method handle, which also sits at the base of the saved argument
// bundle. Since the GC may not be able to fish out the int[]
// array, so it is pushed explicitly on the stack. This may be
// an unnecessary expense.
//
// The following register conventions are significant at this point:
// rsp the thread stack, as always; preserved by caller
// rsi/r13 exact TOS of recursive frame (contents of [rbp-2])
// rcx recursive method handle (contents of [rsp+N+1])
// rbp preserved by caller (not used by caller)
// Unless otherwise specified, all registers can be blown by the call.
//
// If this frame must be walked, the transformed adapter arguments
// will be found with the help of the saved arguments descriptor.
//
// Therefore, the descriptor must match the referenced arguments.
// The arguments must be followed by at least one word of padding,
// which will be necessary to complete the final method handle call.
// That word is not treated as holding an oop. Neither is the word
//
// The word pointed to by the return argument pointer is not
// treated as an oop, even if points to a saved argument.
// This allows the saved argument list to have a "hole" in it
// to receive an oop from the recursive call.
// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.)
//
// When the recursive callee returns, RicochetBlob::bounce_addr will
// immediately jump to the continuation stored in the RF.
// This continuation will merge the recursive return value
// into the saved argument list. At that point, the original
// rsi, rbp, and rsp will be reloaded, the ricochet frame will
// disappear, and the final target of the adapter method handle
// will be invoked on the transformed argument list.
class RicochetFrame {
friend class MethodHandles;
friend class VMStructs;
private:
intptr_t* _continuation; // what to do when control gets back here
oopDesc* _saved_target; // target method handle to invoke on saved_args
oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3)
intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2)
intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1)
intptr_t* _sender_link; // *must* coincide with frame::link_offset (0)
address _sender_pc; // *must* coincide with frame::return_addr_offset (1)
public:
intptr_t* continuation() const { return _continuation; }
oop saved_target() const { return _saved_target; }
oop saved_args_layout() const { return _saved_args_layout; }
intptr_t* saved_args_base() const { return _saved_args_base; }
intptr_t conversion() const { return _conversion; }
intptr_t* exact_sender_sp() const { return _exact_sender_sp; }
intptr_t* sender_link() const { return _sender_link; }
address sender_pc() const { return _sender_pc; }
intptr_t* extended_sender_sp() const {
// The extended sender SP is above the current RicochetFrame.
return (intptr_t*) (((address) this) + sizeof(RicochetFrame));
}
intptr_t return_value_slot_number() const {
return adapter_conversion_vminfo(conversion());
}
BasicType return_value_type() const {
return adapter_conversion_dest_type(conversion());
}
bool has_return_value_slot() const {
return return_value_type() != T_VOID;
}
intptr_t* return_value_slot_addr() const {
assert(has_return_value_slot(), "");
return saved_arg_slot_addr(return_value_slot_number());
}
intptr_t* saved_target_slot_addr() const {
return saved_arg_slot_addr(saved_args_length());
}
intptr_t* saved_arg_slot_addr(int slot) const {
assert(slot >= 0, "");
return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
}
jint saved_args_length() const;
jint saved_arg_offset(int arg) const;
// GC interface
oop* saved_target_addr() { return (oop*)&_saved_target; }
oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; }
oop compute_saved_args_layout(bool read_cache, bool write_cache);
// Compiler/assembler interface.
static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); }
static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); }
static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); }
static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); }
static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); }
static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); }
static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); }
static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); }
// This value is not used for much, but it apparently must be nonzero.
static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); }
#ifdef ASSERT
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
static int magic_number_1_offset_in_bytes() { return -wordSize; }
static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); }
intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); };
intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); };
#endif //ASSERT
enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
static void verify_offsets() NOT_DEBUG_RETURN;
void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
void zap_arguments() NOT_DEBUG_RETURN;
static void generate_ricochet_blob(MacroAssembler* _masm,
// output params:
int* bounce_offset,
int* exception_offset,
int* frame_size_in_words);
static void enter_ricochet_frame(MacroAssembler* _masm,
Register rcx_recv,
Register rax_argv,
address return_handler,
Register rbx_temp);
static void leave_ricochet_frame(MacroAssembler* _masm,
Register rcx_recv,
Register new_sp_reg,
Register sender_pc_reg);
static Address frame_address(int offset = 0) {
// The RicochetFrame is found by subtracting a constant offset from rbp.
return Address(rbp, - sender_link_offset_in_bytes() + offset);
}
static RicochetFrame* from_frame(const frame& fr) {
address bp = (address) fr.fp();
RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
rf->verify();
return rf;
}
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
}; };
// Additional helper methods for MethodHandles code generation: // Additional helper methods for MethodHandles code generation:
public: public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
static void load_stack_move(MacroAssembler* _masm,
Register rdi_stack_move,
Register rcx_amh,
bool might_be_negative);
static void insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register rax_argslot,
Register rbx_temp, Register rdx_temp);
static void remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register rax_argslot,
Register rbx_temp, Register rdx_temp);
static void push_arg_slots(MacroAssembler* _masm,
Register rax_argslot,
RegisterOrConstant slot_count,
int skip_words_count,
Register rbx_temp, Register rdx_temp);
static void move_arg_slots_up(MacroAssembler* _masm,
Register rbx_bottom, // invariant
Address top_addr, // can use rax_temp
RegisterOrConstant positive_distance_in_slots,
Register rax_temp, Register rdx_temp);
static void move_arg_slots_down(MacroAssembler* _masm,
Address bottom_addr, // can use rax_temp
Register rbx_top, // invariant
RegisterOrConstant negative_distance_in_slots,
Register rax_temp, Register rdx_temp);
static void move_typed_arg(MacroAssembler* _masm,
BasicType type, bool is_element,
Address slot_dest, Address value_src,
Register rbx_temp, Register rdx_temp);
static void move_return_value(MacroAssembler* _masm, BasicType type,
Address return_slot);
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_argslots(MacroAssembler* _masm,
RegisterOrConstant argslot_count,
Register argslot_reg,
bool negate_argslot,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_stack_move(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int direction) NOT_DEBUG_RETURN;
static void verify_klass(MacroAssembler* _masm, static void verify_klass(MacroAssembler* _masm,
Register obj, KlassHandle klass, Register obj, KlassHandle klass,
@ -297,9 +43,17 @@ public:
"reference is a MH"); "reference is a MH");
} }
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
// Similar to InterpreterMacroAssembler::jump_from_interpreted. // Similar to InterpreterMacroAssembler::jump_from_interpreted.
// Takes care of special dispatch from single stepping too. // Takes care of special dispatch from single stepping too.
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp); static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
bool for_compiler_entry);
static void jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2,
bool for_compiler_entry);
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;

View file

@ -643,6 +643,19 @@ static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_
__ movdbl(r, Address(saved_sp, next_val_off)); __ movdbl(r, Address(saved_sp, next_val_off));
} }
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
address code_start, address code_end,
Label& L_ok) {
Label L_fail;
__ lea(temp_reg, ExternalAddress(code_start));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::belowEqual, L_fail);
__ lea(temp_reg, ExternalAddress(code_end));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::below, L_ok);
__ bind(L_fail);
}
static void gen_i2c_adapter(MacroAssembler *masm, static void gen_i2c_adapter(MacroAssembler *masm,
int total_args_passed, int total_args_passed,
int comp_args_on_stack, int comp_args_on_stack,
@ -653,9 +666,53 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// we may do a i2c -> c2i transition if we lose a race where compiled // we may do a i2c -> c2i transition if we lose a race where compiled
// code goes non-entrant while we get args ready. // code goes non-entrant while we get args ready.
// Adapters can be frameless because they do not require the caller
// to perform additional cleanup work, such as correcting the stack pointer.
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
// even if a callee has modified the stack pointer.
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
// routinely repairs its caller's stack pointer (from sender_sp, which is set
// up via the senderSP register).
// In other words, if *either* the caller or callee is interpreted, we can
// get the stack pointer repaired after a call.
// This is why c2i and i2c adapters cannot be indefinitely composed.
// In particular, if a c2i adapter were to somehow call an i2c adapter,
// both caller and callee would be compiled methods, and neither would
// clean up the stack pointer changes performed by the two adapters.
// If this happens, control eventually transfers back to the compiled
// caller, but with an uncorrected stack, causing delayed havoc.
// Pick up the return address // Pick up the return address
__ movptr(rax, Address(rsp, 0)); __ movptr(rax, Address(rsp, 0));
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
// So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr),
// "i2c adapter must return to an interpreter frame");
__ block_comment("verify_i2c { ");
Label L_ok;
if (Interpreter::code() != NULL)
range_check(masm, rax, rdi,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
range_check(masm, rax, rdi,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
range_check(masm, rax, rdi,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
const char* msg = "i2c adapter must return to an interpreter frame";
__ block_comment(msg);
__ stop(msg);
__ bind(L_ok);
__ block_comment("} verify_i2ce ");
}
// Must preserve original SP for loading incoming arguments because // Must preserve original SP for loading incoming arguments because
// we need to align the outgoing SP for compiled code. // we need to align the outgoing SP for compiled code.
__ movptr(rdi, rsp); __ movptr(rdi, rsp);
@ -1293,6 +1350,89 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
__ bind(done); __ bind(done);
} }
static void verify_oop_args(MacroAssembler* masm,
int total_args_passed,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = rbx; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_OBJECT ||
sig_bt[i] == T_ARRAY) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
__ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
__ verify_oop(temp_reg);
} else {
__ verify_oop(r->as_Register());
}
}
}
}
}
static void gen_special_dispatch(MacroAssembler* masm,
int total_args_passed,
int comp_args_on_stack,
vmIntrinsics::ID special_dispatch,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, total_args_passed, sig_bt, regs);
// Now write the args into the outgoing interpreter space
bool has_receiver = false;
Register receiver_reg = noreg;
int member_arg_pos = -1;
Register member_reg = noreg;
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
if (ref_kind != 0) {
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
member_reg = rbx; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (special_dispatch == vmIntrinsics::_invokeBasic) {
has_receiver = true;
} else {
guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
}
if (member_reg != noreg) {
// Load the member_arg into register, if necessary.
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
VMReg r = regs[member_arg_pos].first();
assert(r->is_valid(), "bad member arg");
if (r->is_stack()) {
__ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
// no data motion is needed
member_reg = r->as_Register();
}
}
if (has_receiver) {
// Make sure the receiver is loaded into a register.
assert(total_args_passed > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg");
if (r->is_stack()) {
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert(false, "receiver always in a register");
receiver_reg = rcx; // known to be free at this point
__ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
// no data motion is needed
receiver_reg = r->as_Register();
}
}
// Figure out which address we are really jumping to:
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments // Generate a native wrapper for a given method. The method takes arguments
@ -1323,14 +1463,37 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
// transition back to thread_in_Java // transition back to thread_in_Java
// return to caller // return to caller
// //
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method, methodHandle method,
int compile_id, int compile_id,
int total_in_args, int total_in_args,
int comp_args_on_stack, int comp_args_on_stack,
BasicType *in_sig_bt, BasicType* in_sig_bt,
VMRegPair *in_regs, VMRegPair* in_regs,
BasicType ret_type) { BasicType ret_type) {
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
int vep_offset = ((intptr_t)__ pc()) - start;
gen_special_dispatch(masm,
total_in_args,
comp_args_on_stack,
method->intrinsic_id(),
in_sig_bt,
in_regs);
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
__ flush();
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
return nmethod::new_native_nmethod(method,
compile_id,
masm->code(),
vep_offset,
frame_complete,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
}
bool is_critical_native = true; bool is_critical_native = true;
address native_func = method->critical_native_function(); address native_func = method->critical_native_function();
if (native_func == NULL) { if (native_func == NULL) {
@ -1436,7 +1599,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
if (in_regs[i].first()->is_Register()) { if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register(); const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) { switch (in_sig_bt[i]) {
case T_ARRAY: case T_ARRAY: // critical array (uses 2 slots on LP64)
case T_BOOLEAN: case T_BOOLEAN:
case T_BYTE: case T_BYTE:
case T_SHORT: case T_SHORT:

View file

@ -590,6 +590,19 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ jmp(rcx); __ jmp(rcx);
} }
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
address code_start, address code_end,
Label& L_ok) {
Label L_fail;
__ lea(temp_reg, ExternalAddress(code_start));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::belowEqual, L_fail);
__ lea(temp_reg, ExternalAddress(code_end));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::below, L_ok);
__ bind(L_fail);
}
static void gen_i2c_adapter(MacroAssembler *masm, static void gen_i2c_adapter(MacroAssembler *masm,
int total_args_passed, int total_args_passed,
int comp_args_on_stack, int comp_args_on_stack,
@ -605,9 +618,53 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// save code can segv when fxsave instructions find improperly // save code can segv when fxsave instructions find improperly
// aligned stack pointer. // aligned stack pointer.
// Adapters can be frameless because they do not require the caller
// to perform additional cleanup work, such as correcting the stack pointer.
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
// even if a callee has modified the stack pointer.
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
// routinely repairs its caller's stack pointer (from sender_sp, which is set
// up via the senderSP register).
// In other words, if *either* the caller or callee is interpreted, we can
// get the stack pointer repaired after a call.
// This is why c2i and i2c adapters cannot be indefinitely composed.
// In particular, if a c2i adapter were to somehow call an i2c adapter,
// both caller and callee would be compiled methods, and neither would
// clean up the stack pointer changes performed by the two adapters.
// If this happens, control eventually transfers back to the compiled
// caller, but with an uncorrected stack, causing delayed havoc.
// Pick up the return address // Pick up the return address
__ movptr(rax, Address(rsp, 0)); __ movptr(rax, Address(rsp, 0));
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
// So, let's test for cascading c2i/i2c adapters right now.
// assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr),
// "i2c adapter must return to an interpreter frame");
__ block_comment("verify_i2c { ");
Label L_ok;
if (Interpreter::code() != NULL)
range_check(masm, rax, r11,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
range_check(masm, rax, r11,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
range_check(masm, rax, r11,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
const char* msg = "i2c adapter must return to an interpreter frame";
__ block_comment(msg);
__ stop(msg);
__ bind(L_ok);
__ block_comment("} verify_i2ce ");
}
// Must preserve original SP for loading incoming arguments because // Must preserve original SP for loading incoming arguments because
// we need to align the outgoing SP for compiled code. // we need to align the outgoing SP for compiled code.
__ movptr(r11, rsp); __ movptr(r11, rsp);
@ -1366,6 +1423,14 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
} }
// Different signatures may require very different orders for the move
// to avoid clobbering other arguments. There's no simple way to
// order them safely. Compute a safe order for issuing stores and
// break any cycles in those stores. This code is fairly general but
// it's not necessary on the other platforms so we keep it in the
// platform dependent code instead of moving it into a shared file.
// (See bugs 7013347 & 7145024.)
// Note that this code is specific to LP64.
class ComputeMoveOrder: public StackObj { class ComputeMoveOrder: public StackObj {
class MoveOperation: public ResourceObj { class MoveOperation: public ResourceObj {
friend class ComputeMoveOrder; friend class ComputeMoveOrder;
@ -1532,6 +1597,89 @@ class ComputeMoveOrder: public StackObj {
} }
}; };
static void verify_oop_args(MacroAssembler* masm,
int total_args_passed,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = rbx; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_OBJECT ||
sig_bt[i] == T_ARRAY) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
__ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
__ verify_oop(temp_reg);
} else {
__ verify_oop(r->as_Register());
}
}
}
}
}
static void gen_special_dispatch(MacroAssembler* masm,
int total_args_passed,
int comp_args_on_stack,
vmIntrinsics::ID special_dispatch,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, total_args_passed, sig_bt, regs);
// Now write the args into the outgoing interpreter space
bool has_receiver = false;
Register receiver_reg = noreg;
int member_arg_pos = -1;
Register member_reg = noreg;
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
if (ref_kind != 0) {
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
member_reg = rbx; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (special_dispatch == vmIntrinsics::_invokeBasic) {
has_receiver = true;
} else {
guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
}
if (member_reg != noreg) {
// Load the member_arg into register, if necessary.
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
VMReg r = regs[member_arg_pos].first();
assert(r->is_valid(), "bad member arg");
if (r->is_stack()) {
__ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
// no data motion is needed
member_reg = r->as_Register();
}
}
if (has_receiver) {
// Make sure the receiver is loaded into a register.
assert(total_args_passed > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg");
if (r->is_stack()) {
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
assert(false, "receiver always in a register");
receiver_reg = j_rarg0; // known to be free at this point
__ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
// no data motion is needed
receiver_reg = r->as_Register();
}
}
// Figure out which address we are really jumping to:
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments // Generate a native wrapper for a given method. The method takes arguments
@ -1539,14 +1687,60 @@ class ComputeMoveOrder: public StackObj {
// convention (handlizes oops, etc), transitions to native, makes the call, // convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and // returns to java state (possibly blocking), unhandlizes any result and
// returns. // returns.
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, //
// Critical native functions are a shorthand for the use of
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point
// check for safepoint in progress
// check if any thread suspend flags are set
// call into JVM and possible unlock the JNI critical
// if a GC was suppressed while in the critical native.
// transition back to thread_in_Java
// return to caller
//
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method, methodHandle method,
int compile_id, int compile_id,
int total_in_args, int total_in_args,
int comp_args_on_stack, int comp_args_on_stack,
BasicType *in_sig_bt, BasicType* in_sig_bt,
VMRegPair *in_regs, VMRegPair* in_regs,
BasicType ret_type) { BasicType ret_type) {
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
int vep_offset = ((intptr_t)__ pc()) - start;
gen_special_dispatch(masm,
total_in_args,
comp_args_on_stack,
method->intrinsic_id(),
in_sig_bt,
in_regs);
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
__ flush();
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
return nmethod::new_native_nmethod(method,
compile_id,
masm->code(),
vep_offset,
frame_complete,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
}
bool is_critical_native = true; bool is_critical_native = true;
address native_func = method->critical_native_function(); address native_func = method->critical_native_function();
if (native_func == NULL) { if (native_func == NULL) {
@ -1658,7 +1852,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
case T_SHORT: case T_SHORT:
case T_CHAR: case T_CHAR:
case T_INT: single_slots++; break; case T_INT: single_slots++; break;
case T_ARRAY: case T_ARRAY: // specific to LP64 (7145024)
case T_LONG: double_slots++; break; case T_LONG: double_slots++; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }

View file

@ -2326,12 +2326,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
// Build this early so it's available for the interpreter
StubRoutines::_throw_WrongMethodTypeException_entry =
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
rax, rcx);
// Build this early so it's available for the interpreter // Build this early so it's available for the interpreter
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
} }

View file

@ -3102,14 +3102,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
// Build this early so it's available for the interpreter. Stub
// expects the required and actual types as register arguments in
// j_rarg0 and j_rarg1 respectively.
StubRoutines::_throw_WrongMethodTypeException_entry =
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
rax, rcx);
// Build this early so it's available for the interpreter. // Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry = StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception", generate_throw_exception("StackOverflowError throw_exception",

View file

@ -710,9 +710,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Need to differentiate between igetfield, agetfield, bgetfield etc. // Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes. // because they are different sizes.
// Use the type from the constant pool cache // Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tosBits); __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rdx for tosBits after the above shift // Make sure we don't need to mask rdx after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rdx, btos); __ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte); __ jcc(Assembler::notEqual, notByte);
__ load_signed_byte(rax, field_address); __ load_signed_byte(rax, field_address);
@ -1513,7 +1513,6 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru case Interpreter::java_lang_math_cos : // fall thru
@ -1526,7 +1525,9 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break; default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
} }
if (entry_point) return entry_point; if (entry_point) return entry_point;

View file

@ -683,9 +683,9 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Need to differentiate between igetfield, agetfield, bgetfield etc. // Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes. // because they are different sizes.
// Use the type from the constant pool cache // Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tosBits); __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask edx for tosBits after the above shift // Make sure we don't need to mask edx after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rdx, atos); __ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj); __ jcc(Assembler::notEqual, notObj);
@ -1524,12 +1524,11 @@ address AbstractInterpreterGenerator::generate_method_entry(
switch (kind) { switch (kind) {
case Interpreter::zerolocals : break; case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break; case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break; case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break; case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break; case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();break;
case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru case Interpreter::java_lang_math_cos : // fall thru
@ -1539,10 +1538,12 @@ address AbstractInterpreterGenerator::generate_method_entry(
case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break; default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
} }
if (entry_point) { if (entry_point) {

View file

@ -446,13 +446,13 @@ void TemplateTable::fast_aldc(bool wide) {
const Register cache = rcx; const Register cache = rcx;
const Register index = rdx; const Register index = rdx;
resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
if (VerifyOops) { if (VerifyOops) {
__ verify_oop(rax); __ verify_oop(rax);
} }
Label L_done, L_throw_exception; Label L_done, L_throw_exception;
const Register con_klass_temp = rcx; // same as Rcache const Register con_klass_temp = rcx; // same as cache
__ load_klass(con_klass_temp, rax); __ load_klass(con_klass_temp, rax);
__ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
__ jcc(Assembler::notEqual, L_done); __ jcc(Assembler::notEqual, L_done);
@ -2084,15 +2084,15 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
Register Rcache, Register Rcache,
Register index, Register index,
size_t index_size) { size_t index_size) {
Register temp = rbx; const Register temp = rbx;
assert_different_registers(result, Rcache, index, temp); assert_different_registers(result, Rcache, index, temp);
Label resolved; Label resolved;
if (byte_no == f1_oop) { if (byte_no == f12_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.) // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
// This kind of CP cache entry does not need to match the flags byte, because // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
// there is a 1-1 relation between bytecode type and CP entry type. // there is a 1-1 relation between bytecode type and CP entry type.
// The caller will also load a methodOop from f2.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
@ -2112,15 +2112,18 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_getstatic : // fall through case Bytecodes::_getstatic : // fall through
case Bytecodes::_putstatic : // fall through case Bytecodes::_putstatic : // fall through
case Bytecodes::_getfield : // fall through case Bytecodes::_getfield : // fall through
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default : ShouldNotReachHere(); break; case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
} }
__ movl(temp, (int)bytecode()); __ movl(temp, (int)bytecode());
__ call_VM(noreg, entry, temp); __ call_VM(noreg, entry, temp);
@ -2149,7 +2152,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
__ movl(flags, Address(cache, index, Address::times_ptr, __ movl(flags, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
// klass overwrite register // klass overwrite register
if (is_static) { if (is_static) {
__ movptr(obj, Address(cache, index, Address::times_ptr, __ movptr(obj, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
@ -2161,7 +2164,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
Register itable_index, Register itable_index,
Register flags, Register flags,
bool is_invokevirtual, bool is_invokevirtual,
bool is_invokevfinal /*unused*/, bool is_invokevfinal, /*unused*/
bool is_invokedynamic) { bool is_invokedynamic) {
// setup registers // setup registers
const Register cache = rcx; const Register cache = rcx;
@ -2171,28 +2174,33 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
assert_different_registers(itable_index, flags); assert_different_registers(itable_index, flags);
assert_different_registers(itable_index, cache, index); assert_different_registers(itable_index, cache, index);
// determine constant pool cache field offsets // determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes( const int method_offset = in_bytes(
constantPoolCacheOopDesc::base_offset() + constantPoolCacheOopDesc::base_offset() +
(is_invokevirtual ((byte_no == f2_byte)
? ConstantPoolCacheEntry::f2_offset() ? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset() : ConstantPoolCacheEntry::f1_offset()));
)
);
const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()); ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields // access constant pool cache fields
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()); ConstantPoolCacheEntry::f2_offset());
if (byte_no == f1_oop) { if (byte_no == f12_oop) {
// Resolved f1_oop goes directly into 'method' register. // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
assert(is_invokedynamic, ""); // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
__ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
itable_index = noreg; // hack to disable load below
} else { } else {
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
} }
if (itable_index != noreg) { if (itable_index != noreg) {
// pick up itable index from f2 also:
assert(byte_no == f1_byte, "already picked up f1");
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
} }
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
@ -2260,10 +2268,10 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
assert(btos == 0, "change code, btos != 0"); assert(btos == 0, "change code, btos != 0");
// btos // btos
__ andptr(flags, 0x0f); __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
__ jcc(Assembler::notZero, notByte); __ jcc(Assembler::notZero, notByte);
__ load_signed_byte(rax, lo ); __ load_signed_byte(rax, lo );
@ -2415,9 +2423,9 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
__ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset + __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset()))); ConstantPoolCacheEntry::flags_offset())));
__ mov(rbx, rsp); __ mov(rbx, rsp);
__ shrl(rcx, ConstantPoolCacheEntry::tosBits); __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rcx for tosBits after the above shift // Make sure we don't need to mask rcx after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rcx, ltos); __ cmpl(rcx, ltos);
__ jccb(Assembler::equal, two_word); __ jccb(Assembler::equal, two_word);
__ cmpl(rcx, dtos); __ cmpl(rcx, dtos);
@ -2467,7 +2475,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Label notVolatile, Done; Label notVolatile, Done;
__ movl(rdx, flags); __ movl(rdx, flags);
__ shrl(rdx, ConstantPoolCacheEntry::volatileField); __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
__ andl(rdx, 0x1); __ andl(rdx, 0x1);
// field addresses // field addresses
@ -2476,9 +2484,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
assert(btos == 0, "change code, btos != 0"); assert(btos == 0, "change code, btos != 0");
__ andl(flags, 0x0f); __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
__ jcc(Assembler::notZero, notByte); __ jcc(Assembler::notZero, notByte);
// btos // btos
@ -2719,7 +2727,7 @@ void TemplateTable::fast_storefield(TosState state) {
// volatile_barrier( ); // volatile_barrier( );
Label notVolatile, Done; Label notVolatile, Done;
__ shrl(rdx, ConstantPoolCacheEntry::volatileField); __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
__ andl(rdx, 0x1); __ andl(rdx, 0x1);
// Check for volatile store // Check for volatile store
__ testl(rdx, rdx); __ testl(rdx, rdx);
@ -2885,19 +2893,29 @@ void TemplateTable::count_calls(Register method, Register temp) {
} }
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index, // itable index, MethodType, etc.
Register recv, // if caller wants to see it
Register flags // if caller wants to test it
) {
// determine flags // determine flags
Bytecodes::Code code = bytecode(); const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface; const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic; const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual; const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial; const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); const bool load_receiver = (recv != noreg);
const bool receiver_null_check = is_invokespecial; const bool save_flags = (flags != noreg);
const bool save_flags = is_invokeinterface || is_invokevirtual; assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
assert(flags == noreg || flags == rdx, "");
assert(recv == noreg || recv == rcx, "");
// setup registers & access constant pool cache // setup registers & access constant pool cache
const Register recv = rcx; if (recv == noreg) recv = rcx;
const Register flags = rdx; if (flags == noreg) flags = rdx;
assert_different_registers(method, index, recv, flags); assert_different_registers(method, index, recv, flags);
// save 'interpreter return address' // save 'interpreter return address'
@ -2905,20 +2923,28 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet) // maybe push appendix to arguments (just before return address)
if (load_receiver) { if (is_invokedynamic || is_invokehandle) {
assert(!is_invokedynamic, ""); Label L_no_push;
__ movl(recv, flags); __ verify_oop(index);
__ andl(recv, 0xFF); __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
// recv count is 0 based? __ jccb(Assembler::zero, L_no_push);
Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)); // Push the appendix as a trailing parameter.
__ movptr(recv, recv_addr); // This must be done before we get the receiver,
__ verify_oop(recv); // since the parameter_size includes it.
__ push(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
} }
// do null check if needed // load receiver if needed (note: no return address pushed yet)
if (receiver_null_check) { if (load_receiver) {
__ null_check(recv); __ movl(recv, flags);
__ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
const int receiver_is_at_end = -1; // back off one slot to get receiver
Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
__ movptr(recv, recv_addr);
__ verify_oop(recv);
} }
if (save_flags) { if (save_flags) {
@ -2926,16 +2952,14 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
} }
// compute return type // compute return type
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask flags for tosBits after the above shift // Make sure we don't need to mask flags after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address // load return address
{ {
address table_addr; const address table_addr = (is_invokeinterface || is_invokedynamic) ?
if (is_invokeinterface || is_invokedynamic) (address)Interpreter::return_5_addrs_by_index_table() :
table_addr = (address)Interpreter::return_5_addrs_by_index_table(); (address)Interpreter::return_3_addrs_by_index_table();
else
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr); ExternalAddress table(table_addr);
__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
} }
@ -2943,7 +2967,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// push return address // push return address
__ push(flags); __ push(flags);
// Restore flag value from the constant pool cache, and restore rsi // Restore flags value from the constant pool cache, and restore rsi
// for later null checks. rsi is the bytecode pointer // for later null checks. rsi is the bytecode pointer
if (save_flags) { if (save_flags) {
__ mov(flags, rsi); __ mov(flags, rsi);
@ -2952,22 +2976,26 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
} }
void TemplateTable::invokevirtual_helper(Register index, Register recv, void TemplateTable::invokevirtual_helper(Register index,
Register flags) { Register recv,
Register flags) {
// Uses temporary registers rax, rdx // Uses temporary registers rax, rdx
assert_different_registers(index, recv, rax, rdx); assert_different_registers(index, recv, rax, rdx);
assert(index == rbx, "");
assert(recv == rcx, "");
// Test for an invoke of a final method // Test for an invoke of a final method
Label notFinal; Label notFinal;
__ movl(rax, flags); __ movl(rax, flags);
__ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
__ jcc(Assembler::zero, notFinal); __ jcc(Assembler::zero, notFinal);
Register method = index; // method must be rbx, const Register method = index; // method must be rbx
assert(method == rbx, "methodOop must be rbx, for interpreter calling convention"); assert(method == rbx,
"methodOop must be rbx for interpreter calling convention");
// do the call - the index is actually the method to call // do the call - the index is actually the method to call
// that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
__ verify_oop(method); __ verify_oop(method);
// It's final, need a null check here! // It's final, need a null check here!
@ -2982,7 +3010,6 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
// get receiver klass // get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes()); __ null_check(recv, oopDesc::klass_offset_in_bytes());
// Keep recv in rcx for callee expects it there
__ load_klass(rax, recv); __ load_klass(rax, recv);
__ verify_oop(rax); __ verify_oop(rax);
@ -2990,9 +3017,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
__ profile_virtual_call(rax, rdi, rdx); __ profile_virtual_call(rax, rdi, rdx);
// get target methodOop & entry point // get target methodOop & entry point
const int base = instanceKlass::vtable_start_offset() * wordSize; __ lookup_virtual_method(rax, index, method);
assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
__ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
__ jump_from_interpreted(method, rdx); __ jump_from_interpreted(method, rdx);
} }
@ -3000,9 +3025,12 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
void TemplateTable::invokevirtual(int byte_no) { void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument"); assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(byte_no,
rbx, // method or vtable index
noreg, // unused itable index
rcx, rdx); // recv, flags
// rbx,: index // rbx: index
// rcx: receiver // rcx: receiver
// rdx: flags // rdx: flags
@ -3013,7 +3041,10 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
rcx); // get receiver also for null check
__ verify_oop(rcx);
__ null_check(rcx);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
__ profile_call(rax); __ profile_call(rax);
@ -3024,7 +3055,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(byte_no, rbx); // get f1 methodOop
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
__ profile_call(rax); __ profile_call(rax);
@ -3042,10 +3073,11 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) { void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no); prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
rcx, rdx); // recv, flags
// rax,: Interface // rax: interface klass (from f1)
// rbx,: index // rbx: itable index (from f2)
// rcx: receiver // rcx: receiver
// rdx: flags // rdx: flags
@ -3055,7 +3087,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// another compliant java compiler. // another compliant java compiler.
Label notMethod; Label notMethod;
__ movl(rdi, rdx); __ movl(rdi, rdx);
__ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface)); __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
__ jcc(Assembler::zero, notMethod); __ jcc(Assembler::zero, notMethod);
invokevirtual_helper(rbx, rcx, rdx); invokevirtual_helper(rbx, rcx, rdx);
@ -3063,6 +3095,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Get receiver klass into rdx - also a null check // Get receiver klass into rdx - also a null check
__ restore_locals(); // restore rdi __ restore_locals(); // restore rdi
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx); __ load_klass(rdx, rcx);
__ verify_oop(rdx); __ verify_oop(rdx);
@ -3077,7 +3110,7 @@ void TemplateTable::invokeinterface(int byte_no) {
rbx, rsi, rbx, rsi,
no_such_interface); no_such_interface);
// rbx,: methodOop to call // rbx: methodOop to call
// rcx: receiver // rcx: receiver
// Check for abstract method error // Check for abstract method error
// Note: This should be done more efficiently via a throw_abstract_method_error // Note: This should be done more efficiently via a throw_abstract_method_error
@ -3116,9 +3149,39 @@ void TemplateTable::invokeinterface(int byte_no) {
__ should_not_reach_here(); __ should_not_reach_here();
} }
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f12_oop, "use this argument");
const Register rbx_method = rbx; // (from f2)
const Register rax_mtype = rax; // (from f1)
const Register rcx_recv = rcx;
const Register rdx_flags = rdx;
if (!EnableInvokeDynamic) {
// rewriter does not generate this bytecode
__ should_not_reach_here();
return;
}
prepare_invoke(byte_no,
rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
rcx_recv);
__ verify_oop(rbx_method);
__ verify_oop(rcx_recv);
__ null_check(rcx_recv);
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
// FIXME: profile the LambdaForm also
__ profile_final_call(rax);
__ jump_from_interpreted(rbx_method, rdx);
}
void TemplateTable::invokedynamic(int byte_no) { void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument"); assert(byte_no == f12_oop, "use this argument");
if (!EnableInvokeDynamic) { if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic. // We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3131,26 +3194,23 @@ void TemplateTable::invokedynamic(int byte_no) {
return; return;
} }
prepare_invoke(rax, rbx, byte_no); const Register rbx_method = rbx;
const Register rax_callsite = rax;
// rax: CallSite object (f1) prepare_invoke(byte_no, rbx_method, rax_callsite);
// rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused)
Register rax_callsite = rax; // rax: CallSite object (from f1)
Register rcx_method_handle = rcx; // rbx: MH.linkToCallSite method (from f2)
// Note: rax_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument // %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call // profile this call
__ profile_call(rsi); __ profile_call(rsi);
__ verify_oop(rax_callsite); __ verify_oop(rax_callsite);
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
__ null_check(rcx_method_handle); __ jump_from_interpreted(rbx_method, rdx);
__ verify_oop(rcx_method_handle);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx_method_handle, rdx);
} }
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------

View file

@ -25,10 +25,15 @@
#ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP #ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
#define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP #define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
static void prepare_invoke(Register method, Register index, int byte_no); static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // if caller wants to see it
Register flags = noreg // if caller wants to test it
);
static void invokevirtual_helper(Register index, Register recv, static void invokevirtual_helper(Register index, Register recv,
Register flags); Register flags);
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint ); static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
// Helpers // Helpers
static void index_check(Register array, Register index); static void index_check(Register array, Register index);

View file

@ -458,7 +458,7 @@ void TemplateTable::fast_aldc(bool wide) {
const Register cache = rcx; const Register cache = rcx;
const Register index = rdx; const Register index = rdx;
resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
if (VerifyOops) { if (VerifyOops) {
__ verify_oop(rax); __ verify_oop(rax);
} }
@ -2125,10 +2125,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers(result, Rcache, index, temp); assert_different_registers(result, Rcache, index, temp);
Label resolved; Label resolved;
if (byte_no == f1_oop) { if (byte_no == f12_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.) // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
// This kind of CP cache entry does not need to match the flags byte, because // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
// there is a 1-1 relation between bytecode type and CP entry type. // there is a 1-1 relation between bytecode type and CP entry type.
// The caller will also load a methodOop from f2.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
@ -2157,6 +2158,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
break; break;
case Bytecodes::_invokehandle:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
break;
case Bytecodes::_invokedynamic: case Bytecodes::_invokedynamic:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
break; break;
@ -2167,7 +2171,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
break; break;
default: default:
ShouldNotReachHere(); fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break; break;
} }
__ movl(temp, (int) bytecode()); __ movl(temp, (int) bytecode());
@ -2180,7 +2184,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ bind(resolved); __ bind(resolved);
} }
// The Rcache and index registers must be set before call // The cache and index registers must be set before call
void TemplateTable::load_field_cp_cache_entry(Register obj, void TemplateTable::load_field_cp_cache_entry(Register obj,
Register cache, Register cache,
Register index, Register index,
@ -2191,17 +2195,17 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
// Field offset // Field offset
__ movptr(off, Address(cache, index, Address::times_8, __ movptr(off, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset()))); ConstantPoolCacheEntry::f2_offset())));
// Flags // Flags
__ movl(flags, Address(cache, index, Address::times_8, __ movl(flags, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset()))); ConstantPoolCacheEntry::flags_offset())));
// klass overwrite register // klass overwrite register
if (is_static) { if (is_static) {
__ movptr(obj, Address(cache, index, Address::times_8, __ movptr(obj, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset()))); ConstantPoolCacheEntry::f1_offset())));
} }
@ -2222,9 +2226,10 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
assert_different_registers(itable_index, flags); assert_different_registers(itable_index, flags);
assert_different_registers(itable_index, cache, index); assert_different_registers(itable_index, cache, index);
// determine constant pool cache field offsets // determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes( const int method_offset = in_bytes(
constantPoolCacheOopDesc::base_offset() + constantPoolCacheOopDesc::base_offset() +
(is_invokevirtual ((byte_no == f2_byte)
? ConstantPoolCacheEntry::f2_offset() ? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset())); : ConstantPoolCacheEntry::f1_offset()));
const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
@ -2233,15 +2238,21 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()); ConstantPoolCacheEntry::f2_offset());
if (byte_no == f1_oop) { if (byte_no == f12_oop) {
// Resolved f1_oop goes directly into 'method' register. // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
assert(is_invokedynamic, ""); // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
__ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
itable_index = noreg; // hack to disable load below
} else { } else {
resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
} }
if (itable_index != noreg) { if (itable_index != noreg) {
// pick up itable index from f2 also:
assert(byte_no == f1_byte, "already picked up f1");
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
} }
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
@ -2317,10 +2328,11 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
Label Done, notByte, notInt, notShort, notChar, Label Done, notByte, notInt, notShort, notChar,
notLong, notFloat, notObj, notDouble; notLong, notFloat, notObj, notDouble;
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask edx after the above shift
assert(btos == 0, "change code, btos != 0"); assert(btos == 0, "change code, btos != 0");
__ andl(flags, 0x0F); __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
__ jcc(Assembler::notZero, notByte); __ jcc(Assembler::notZero, notByte);
// btos // btos
__ load_signed_byte(rax, field); __ load_signed_byte(rax, field);
@ -2466,10 +2478,9 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
Address::times_8, Address::times_8,
in_bytes(cp_base_offset + in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset()))); ConstantPoolCacheEntry::flags_offset())));
__ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rcx for tosBits after the // Make sure we don't need to mask rcx after the above shift
// above shift ConstantPoolCacheEntry::verify_tos_state_shift();
ConstantPoolCacheEntry::verify_tosBits();
__ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
__ cmpl(c_rarg3, ltos); __ cmpl(c_rarg3, ltos);
__ cmovptr(Assembler::equal, __ cmovptr(Assembler::equal,
@ -2516,7 +2527,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Label notVolatile, Done; Label notVolatile, Done;
__ movl(rdx, flags); __ movl(rdx, flags);
__ shrl(rdx, ConstantPoolCacheEntry::volatileField); __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
__ andl(rdx, 0x1); __ andl(rdx, 0x1);
// field address // field address
@ -2525,10 +2536,10 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Label notByte, notInt, notShort, notChar, Label notByte, notInt, notShort, notChar,
notLong, notFloat, notObj, notDouble; notLong, notFloat, notObj, notDouble;
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
assert(btos == 0, "change code, btos != 0"); assert(btos == 0, "change code, btos != 0");
__ andl(flags, 0x0f); __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
__ jcc(Assembler::notZero, notByte); __ jcc(Assembler::notZero, notByte);
// btos // btos
@ -2751,7 +2762,7 @@ void TemplateTable::fast_storefield(TosState state) {
// Assembler::StoreStore)); // Assembler::StoreStore));
Label notVolatile; Label notVolatile;
__ shrl(rdx, ConstantPoolCacheEntry::volatileField); __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
__ andl(rdx, 0x1); __ andl(rdx, 0x1);
// Get object from stack // Get object from stack
@ -2832,7 +2843,7 @@ void TemplateTable::fast_accessfield(TosState state) {
// __ movl(rdx, Address(rcx, rbx, Address::times_8, // __ movl(rdx, Address(rcx, rbx, Address::times_8,
// in_bytes(constantPoolCacheOopDesc::base_offset() + // in_bytes(constantPoolCacheOopDesc::base_offset() +
// ConstantPoolCacheEntry::flags_offset()))); // ConstantPoolCacheEntry::flags_offset())));
// __ shrl(rdx, ConstantPoolCacheEntry::volatileField); // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
// __ andl(rdx, 0x1); // __ andl(rdx, 0x1);
// } // }
__ movptr(rbx, Address(rcx, rbx, Address::times_8, __ movptr(rbx, Address(rcx, rbx, Address::times_8,
@ -2920,7 +2931,7 @@ void TemplateTable::fast_xaccess(TosState state) {
// __ movl(rdx, Address(rcx, rdx, Address::times_8, // __ movl(rdx, Address(rcx, rdx, Address::times_8,
// in_bytes(constantPoolCacheOopDesc::base_offset() + // in_bytes(constantPoolCacheOopDesc::base_offset() +
// ConstantPoolCacheEntry::flags_offset()))); // ConstantPoolCacheEntry::flags_offset())));
// __ shrl(rdx, ConstantPoolCacheEntry::volatileField); // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
// __ testl(rdx, 0x1); // __ testl(rdx, 0x1);
// __ jcc(Assembler::zero, notVolatile); // __ jcc(Assembler::zero, notVolatile);
// __ membar(Assembler::LoadLoad); // __ membar(Assembler::LoadLoad);
@ -2940,19 +2951,29 @@ void TemplateTable::count_calls(Register method, Register temp) {
ShouldNotReachHere(); ShouldNotReachHere();
} }
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { void TemplateTable::prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index, // itable index, MethodType, etc.
Register recv, // if caller wants to see it
Register flags // if caller wants to test it
) {
// determine flags // determine flags
Bytecodes::Code code = bytecode(); const Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface; const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic; const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokehandle = code == Bytecodes::_invokehandle;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual; const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial; const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); const bool load_receiver = (recv != noreg);
const bool receiver_null_check = is_invokespecial; const bool save_flags = (flags != noreg);
const bool save_flags = is_invokeinterface || is_invokevirtual; assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
assert(flags == noreg || flags == rdx, "");
assert(recv == noreg || recv == rcx, "");
// setup registers & access constant pool cache // setup registers & access constant pool cache
const Register recv = rcx; if (recv == noreg) recv = rcx;
const Register flags = rdx; if (flags == noreg) flags = rdx;
assert_different_registers(method, index, recv, flags); assert_different_registers(method, index, recv, flags);
// save 'interpreter return address' // save 'interpreter return address'
@ -2960,19 +2981,29 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// load receiver if needed (note: no return address pushed yet) // maybe push appendix to arguments (just before return address)
if (load_receiver) { if (is_invokedynamic || is_invokehandle) {
assert(!is_invokedynamic, ""); Label L_no_push;
__ movl(recv, flags); __ verify_oop(index);
__ andl(recv, 0xFF); __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); __ jccb(Assembler::zero, L_no_push);
__ movptr(recv, recv_addr); // Push the appendix as a trailing parameter.
__ verify_oop(recv); // This must be done before we get the receiver,
// since the parameter_size includes it.
__ push(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
} }
// do null check if needed // load receiver if needed (after appendix is pushed so parameter size is correct)
if (receiver_null_check) { // Note: no return address pushed yet
__ null_check(recv); if (load_receiver) {
__ movl(recv, flags);
__ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
const int receiver_is_at_end = -1; // back off one slot to get receiver
Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
__ movptr(recv, recv_addr);
__ verify_oop(recv);
} }
if (save_flags) { if (save_flags) {
@ -2980,16 +3011,14 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
} }
// compute return type // compute return type
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask flags for tosBits after the above shift // Make sure we don't need to mask flags after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tos_state_shift();
// load return address // load return address
{ {
address table_addr; const address table_addr = (is_invokeinterface || is_invokedynamic) ?
if (is_invokeinterface || is_invokedynamic) (address)Interpreter::return_5_addrs_by_index_table() :
table_addr = (address)Interpreter::return_5_addrs_by_index_table(); (address)Interpreter::return_3_addrs_by_index_table();
else
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr); ExternalAddress table(table_addr);
__ lea(rscratch1, table); __ lea(rscratch1, table);
__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
@ -2998,7 +3027,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// push return address // push return address
__ push(flags); __ push(flags);
// Restore flag field from the constant pool cache, and restore esi // Restore flags value from the constant pool cache, and restore rsi
// for later null checks. r13 is the bytecode pointer // for later null checks. r13 is the bytecode pointer
if (save_flags) { if (save_flags) {
__ movl(flags, r13); __ movl(flags, r13);
@ -3012,11 +3041,13 @@ void TemplateTable::invokevirtual_helper(Register index,
Register flags) { Register flags) {
// Uses temporary registers rax, rdx // Uses temporary registers rax, rdx
assert_different_registers(index, recv, rax, rdx); assert_different_registers(index, recv, rax, rdx);
assert(index == rbx, "");
assert(recv == rcx, "");
// Test for an invoke of a final method // Test for an invoke of a final method
Label notFinal; Label notFinal;
__ movl(rax, flags); __ movl(rax, flags);
__ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
__ jcc(Assembler::zero, notFinal); __ jcc(Assembler::zero, notFinal);
const Register method = index; // method must be rbx const Register method = index; // method must be rbx
@ -3024,6 +3055,7 @@ void TemplateTable::invokevirtual_helper(Register index,
"methodOop must be rbx for interpreter calling convention"); "methodOop must be rbx for interpreter calling convention");
// do the call - the index is actually the method to call // do the call - the index is actually the method to call
// that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
__ verify_oop(method); __ verify_oop(method);
// It's final, need a null check here! // It's final, need a null check here!
@ -3039,20 +3071,13 @@ void TemplateTable::invokevirtual_helper(Register index,
// get receiver klass // get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes()); __ null_check(recv, oopDesc::klass_offset_in_bytes());
__ load_klass(rax, recv); __ load_klass(rax, recv);
__ verify_oop(rax); __ verify_oop(rax);
// profile this call // profile this call
__ profile_virtual_call(rax, r14, rdx); __ profile_virtual_call(rax, r14, rdx);
// get target methodOop & entry point // get target methodOop & entry point
const int base = instanceKlass::vtable_start_offset() * wordSize; __ lookup_virtual_method(rax, index, method);
assert(vtableEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
__ movptr(method, Address(rax, index,
Address::times_8,
base + vtableEntry::method_offset_in_bytes()));
__ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
__ jump_from_interpreted(method, rdx); __ jump_from_interpreted(method, rdx);
} }
@ -3060,7 +3085,10 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) { void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f2_byte, "use this argument"); assert(byte_no == f2_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(byte_no,
rbx, // method or vtable index
noreg, // unused itable index
rcx, rdx); // recv, flags
// rbx: index // rbx: index
// rcx: receiver // rcx: receiver
@ -3073,7 +3101,10 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
rcx); // get receiver also for null check
__ verify_oop(rcx);
__ null_check(rcx);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
__ profile_call(rax); __ profile_call(rax);
@ -3084,7 +3115,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rbx, noreg, byte_no); prepare_invoke(byte_no, rbx); // get f1 methodOop
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
__ profile_call(rax); __ profile_call(rax);
@ -3100,10 +3131,11 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) { void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument"); assert(byte_no == f1_byte, "use this argument");
prepare_invoke(rax, rbx, byte_no); prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
rcx, rdx); // recv, flags
// rax: Interface // rax: interface klass (from f1)
// rbx: index // rbx: itable index (from f2)
// rcx: receiver // rcx: receiver
// rdx: flags // rdx: flags
@ -3113,14 +3145,15 @@ void TemplateTable::invokeinterface(int byte_no) {
// another compliant java compiler. // another compliant java compiler.
Label notMethod; Label notMethod;
__ movl(r14, rdx); __ movl(r14, rdx);
__ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
__ jcc(Assembler::zero, notMethod); __ jcc(Assembler::zero, notMethod);
invokevirtual_helper(rbx, rcx, rdx); invokevirtual_helper(rbx, rcx, rdx);
__ bind(notMethod); __ bind(notMethod);
// Get receiver klass into rdx - also a null check // Get receiver klass into rdx - also a null check
__ restore_locals(); // restore r14 __ restore_locals(); // restore r14
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx); __ load_klass(rdx, rcx);
__ verify_oop(rdx); __ verify_oop(rdx);
@ -3135,7 +3168,7 @@ void TemplateTable::invokeinterface(int byte_no) {
rbx, r13, rbx, r13,
no_such_interface); no_such_interface);
// rbx,: methodOop to call // rbx: methodOop to call
// rcx: receiver // rcx: receiver
// Check for abstract method error // Check for abstract method error
// Note: This should be done more efficiently via a throw_abstract_method_error // Note: This should be done more efficiently via a throw_abstract_method_error
@ -3172,12 +3205,42 @@ void TemplateTable::invokeinterface(int byte_no) {
InterpreterRuntime::throw_IncompatibleClassChangeError)); InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here. // the call_VM checks for exception, so we should never return here.
__ should_not_reach_here(); __ should_not_reach_here();
return;
} }
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f12_oop, "use this argument");
const Register rbx_method = rbx; // f2
const Register rax_mtype = rax; // f1
const Register rcx_recv = rcx;
const Register rdx_flags = rdx;
if (!EnableInvokeDynamic) {
// rewriter does not generate this bytecode
__ should_not_reach_here();
return;
}
prepare_invoke(byte_no,
rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
rcx_recv);
__ verify_oop(rbx_method);
__ verify_oop(rcx_recv);
__ null_check(rcx_recv);
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
// FIXME: profile the LambdaForm also
__ profile_final_call(rax);
__ jump_from_interpreted(rbx_method, rdx);
}
void TemplateTable::invokedynamic(int byte_no) { void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
assert(byte_no == f1_oop, "use this argument"); assert(byte_no == f12_oop, "use this argument");
if (!EnableInvokeDynamic) { if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic. // We should not encounter this bytecode if !EnableInvokeDynamic.
@ -3190,26 +3253,23 @@ void TemplateTable::invokedynamic(int byte_no) {
return; return;
} }
prepare_invoke(rax, rbx, byte_no); const Register rbx_method = rbx;
const Register rax_callsite = rax;
// rax: CallSite object (f1) prepare_invoke(byte_no, rbx_method, rax_callsite);
// rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused)
Register rax_callsite = rax; // rax: CallSite object (from f1)
Register rcx_method_handle = rcx; // rbx: MH.linkToCallSite method (from f2)
// Note: rax_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument // %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call // profile this call
__ profile_call(r13); __ profile_call(r13);
__ verify_oop(rax_callsite); __ verify_oop(rax_callsite);
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
__ null_check(rcx_method_handle); __ jump_from_interpreted(rbx_method, rdx);
__ verify_oop(rcx_method_handle);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx_method_handle, rdx);
} }

View file

@ -25,7 +25,12 @@
#ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP #ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
#define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP #define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
static void prepare_invoke(Register method, Register index, int byte_no); static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // if caller wants to see it
Register flags = noreg // if caller wants to test it
);
static void invokevirtual_helper(Register index, Register recv, static void invokevirtual_helper(Register index, Register recv,
Register flags); Register flags);
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint); static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);

View file

@ -76,8 +76,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// get receiver klass // get receiver klass
address npe_addr = __ pc(); address npe_addr = __ pc();
__ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
// compute entry offset (in words)
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT #ifndef PRODUCT
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
@ -93,7 +92,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const Register method = rbx; const Register method = rbx;
// load methodOop and target address // load methodOop and target address
__ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes())); __ lookup_virtual_method(rax, vtable_index, method);
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
__ cmpptr(method, (int32_t)NULL_WORD); __ cmpptr(method, (int32_t)NULL_WORD);

View file

@ -69,10 +69,6 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
address npe_addr = __ pc(); address npe_addr = __ pc();
__ load_klass(rax, j_rarg0); __ load_klass(rax, j_rarg0);
// compute entry offset (in words)
int entry_offset =
instanceKlass::vtable_start_offset() + vtable_index * vtableEntry::size();
#ifndef PRODUCT #ifndef PRODUCT
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
@ -90,9 +86,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// load methodOop and target address // load methodOop and target address
const Register method = rbx; const Register method = rbx;
__ movptr(method, Address(rax, __ lookup_virtual_method(rax, vtable_index, method);
entry_offset * wordSize +
vtableEntry::method_offset_in_bytes()));
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
__ cmpptr(method, (int32_t)NULL_WORD); __ cmpptr(method, (int32_t)NULL_WORD);

File diff suppressed because it is too large Load diff

View file

@ -1367,22 +1367,6 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
return offset; return offset;
} }
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
switch (opcode) {
case Op_PopCountI:
case Op_PopCountL:
if (!UsePopCountInstruction)
return false;
break;
}
return true; // Per default match rules are supported.
}
int Matcher::regnum_to_fpu_offset(int regnum) { int Matcher::regnum_to_fpu_offset(int regnum) {
return regnum - 32; // The FP registers are in the second chunk return regnum - 32; // The FP registers are in the second chunk
} }

View file

@ -1513,22 +1513,6 @@ int emit_deopt_handler(CodeBuffer& cbuf)
return offset; return offset;
} }
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
switch (opcode) {
case Op_PopCountI:
case Op_PopCountL:
if (!UsePopCountInstruction)
return false;
break;
}
return true; // Per default match rules are supported.
}
int Matcher::regnum_to_fpu_offset(int regnum) int Matcher::regnum_to_fpu_offset(int regnum)
{ {
return regnum - 32; // The FP registers are in the second chunk return regnum - 32; // The FP registers are in the second chunk
@ -6427,6 +6411,31 @@ instruct castP2X(rRegL dst, rRegP src)
ins_pipe(ialu_reg_reg); // XXX ins_pipe(ialu_reg_reg); // XXX
%} %}
// Convert oop into int for vectors alignment masking
instruct convP2I(rRegI dst, rRegP src)
%{
match(Set dst (ConvL2I (CastP2X src)));
format %{ "movl $dst, $src\t# ptr -> int" %}
ins_encode %{
__ movl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg); // XXX
%}
// Convert compressed oop into int for vectors alignment masking
// in case of 32bit oops (heap < 4Gb).
instruct convN2I(rRegI dst, rRegN src)
%{
predicate(Universe::narrow_oop_shift() == 0);
match(Set dst (ConvL2I (CastP2X (DecodeN src))));
format %{ "movl $dst, $src\t# compressed ptr -> int" %}
ins_encode %{
__ movl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg); // XXX
%}
// Convert oop pointer into compressed form // Convert oop pointer into compressed form
instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{ instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
@ -10049,11 +10058,10 @@ instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
// The next instructions have long latency and use Int unit. Set high cost.
instruct MoveI2F_reg_reg(regF dst, rRegI src) %{ instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
match(Set dst (MoveI2F src)); match(Set dst (MoveI2F src));
effect(DEF dst, USE src); effect(DEF dst, USE src);
ins_cost(300); ins_cost(100);
format %{ "movd $dst,$src\t# MoveI2F" %} format %{ "movd $dst,$src\t# MoveI2F" %}
ins_encode %{ ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
@ -10064,7 +10072,7 @@ instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
instruct MoveL2D_reg_reg(regD dst, rRegL src) %{ instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
match(Set dst (MoveL2D src)); match(Set dst (MoveL2D src));
effect(DEF dst, USE src); effect(DEF dst, USE src);
ins_cost(300); ins_cost(100);
format %{ "movd $dst,$src\t# MoveL2D" %} format %{ "movd $dst,$src\t# MoveL2D" %}
ins_encode %{ ins_encode %{
__ movdq($dst$$XMMRegister, $src$$Register); __ movdq($dst$$XMMRegister, $src$$Register);

View file

@ -646,16 +646,15 @@ int CppInterpreter::method_handle_entry(methodOop method,
oop method_type = (oop) p; oop method_type = (oop) p;
// The MethodHandle is in the slot after the arguments // The MethodHandle is in the slot after the arguments
oop form = java_lang_invoke_MethodType::form(method_type); int num_vmslots = argument_slots - 1;
int num_vmslots = java_lang_invoke_MethodTypeForm::vmslots(form);
assert(argument_slots == num_vmslots + 1, "should be");
oop method_handle = VMSLOTS_OBJECT(num_vmslots); oop method_handle = VMSLOTS_OBJECT(num_vmslots);
// InvokeGeneric requires some extra shuffling // InvokeGeneric requires some extra shuffling
oop mhtype = java_lang_invoke_MethodHandle::type(method_handle); oop mhtype = java_lang_invoke_MethodHandle::type(method_handle);
bool is_exact = mhtype == method_type; bool is_exact = mhtype == method_type;
if (!is_exact) { if (!is_exact) {
if (method->intrinsic_id() == vmIntrinsics::_invokeExact) { if (true || // FIXME
method->intrinsic_id() == vmIntrinsics::_invokeExact) {
CALL_VM_NOCHECK_NOFIX( CALL_VM_NOCHECK_NOFIX(
SharedRuntime::throw_WrongMethodTypeException( SharedRuntime::throw_WrongMethodTypeException(
thread, method_type, mhtype)); thread, method_type, mhtype));
@ -670,8 +669,8 @@ int CppInterpreter::method_handle_entry(methodOop method,
// NB the x86 code for this (in methodHandles_x86.cpp, search for // NB the x86 code for this (in methodHandles_x86.cpp, search for
// "genericInvoker") is really really odd. I'm hoping it's trying // "genericInvoker") is really really odd. I'm hoping it's trying
// to accomodate odd VM/class library combinations I can ignore. // to accomodate odd VM/class library combinations I can ignore.
oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form); oop adapter = NULL; //FIXME: load the adapter from the CP cache
if (adapter == NULL) { IF (adapter == NULL) {
CALL_VM_NOCHECK_NOFIX( CALL_VM_NOCHECK_NOFIX(
SharedRuntime::throw_WrongMethodTypeException( SharedRuntime::throw_WrongMethodTypeException(
thread, method_type, mhtype)); thread, method_type, mhtype));
@ -761,7 +760,7 @@ void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
return; return;
} }
if (entry_kind != MethodHandles::_invokespecial_mh) { if (entry_kind != MethodHandles::_invokespecial_mh) {
int index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle); intptr_t index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
instanceKlass* rcvrKlass = instanceKlass* rcvrKlass =
(instanceKlass *) receiver->klass()->klass_part(); (instanceKlass *) receiver->klass()->klass_part();
if (entry_kind == MethodHandles::_invokevirtual_mh) { if (entry_kind == MethodHandles::_invokevirtual_mh) {
@ -1179,8 +1178,7 @@ BasicType CppInterpreter::result_type_of_handle(oop method_handle) {
intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack, intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack,
oop method_handle) { oop method_handle) {
oop method_type = java_lang_invoke_MethodHandle::type(method_handle); oop method_type = java_lang_invoke_MethodHandle::type(method_handle);
oop form = java_lang_invoke_MethodType::form(method_type); int argument_slots = java_lang_invoke_MethodType::ptype_slot_count(method_type);
int argument_slots = java_lang_invoke_MethodTypeForm::vmslots(form);
return stack->sp() + argument_slots; return stack->sp() + argument_slots;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -60,4 +60,7 @@ define_pd_global(bool, UseMembar, false);
// GC Ergo Flags // GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP

View file

@ -38,6 +38,5 @@
address generate_empty_entry(); address generate_empty_entry();
address generate_accessor_entry(); address generate_accessor_entry();
address generate_Reference_get_entry(); address generate_Reference_get_entry();
address generate_method_handle_entry();
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP

Some files were not shown because too many files have changed in this diff Show more